diff --git a/Makefile b/Makefile index fee0cdcc..ad7cbd0c 100644 --- a/Makefile +++ b/Makefile @@ -83,7 +83,7 @@ build: mserv mservctl ## Build server and client binary. .PHONY: build mserv: -> go build -o bin/mserv -mod=vendor +> go build -o bin/mserv .PHONY: mserv mservctl: diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore deleted file mode 100644 index 748e4c80..00000000 --- a/vendor/github.com/PuerkitoBio/purell/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.sublime-* -.DS_Store -*.swp -*.swo -tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6af..00000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE deleted file mode 100644 index 4b9986de..00000000 --- a/vendor/github.com/PuerkitoBio/purell/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Martin Angers -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md deleted file mode 100644 index 07de0c49..00000000 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ /dev/null @@ -1,188 +0,0 @@ -# Purell - -Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... - -Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. - -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) - -## Install - -`go get github.com/PuerkitoBio/purell` - -## Changelog - -* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). -* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). -* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). -* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). -* **v0.2.0** : Add benchmarks, Attempt IDN support. -* **v0.1.0** : Initial release. - -## Examples - -From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): - -```go -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} -``` - -## API - -As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: - -```go -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) -``` - -For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. - -The [full godoc reference is available on gopkgdoc][godoc]. - -Some things to note: - -* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. - -* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): - - %24 -> $ - - %26 -> & - - %2B-%3B -> +,-./0123456789:; - - %3D -> = - - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ - - %5F -> _ - - %61-%7A -> abcdefghijklmnopqrstuvwxyz - - %7E -> ~ - - -* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). - -* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. - -* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. - -### Safe vs Usually Safe vs Unsafe - -Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. - -Consider the following URL: - -`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -Normalizing with the `FlagsSafe` gives: - -`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -With the `FlagsUsuallySafeGreedy`: - -`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` - -And with `FlagsUnsafeGreedy`: - -`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` - -## TODOs - -* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. - -## Thanks / Contributions - -@rogpeppe -@jehiah -@opennota -@pchristopher1275 -@zenovich -@beeker1121 - -## License - -The [BSD 3-Clause license][bsd]. - -[bsd]: http://opensource.org/licenses/BSD-3-Clause -[wiki]: http://en.wikipedia.org/wiki/URL_normalization -[rfc]: http://tools.ietf.org/html/rfc3986#section-6 -[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell -[pr5]: https://github.com/PuerkitoBio/purell/pull/5 -[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go deleted file mode 100644 index 6d0fc190..00000000 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Package purell offers URL normalization as described on the wikipedia page: -http://en.wikipedia.org/wiki/URL_normalization -*/ -package purell - -import ( - "bytes" - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/PuerkitoBio/urlesc" - "golang.org/x/net/idna" - "golang.org/x/text/unicode/norm" - "golang.org/x/text/width" -) - -// A set of normalization flags determines how a URL will -// be normalized. -type NormalizationFlags uint - -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) - -const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) -var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) -var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) -var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) -var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) -var rxEmptyPort = regexp.MustCompile(`:+$`) - -// Map of flags to implementation function. -// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically -// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. - -// Since maps have undefined traversing order, make a slice of ordered keys -var flagsOrder = []NormalizationFlags{ - FlagLowercaseScheme, - FlagLowercaseHost, - FlagRemoveDefaultPort, - FlagRemoveDirectoryIndex, - FlagRemoveDotSegments, - FlagRemoveFragment, - FlagForceHTTP, // Must be after remove default port (because https=443/http=80) - FlagRemoveDuplicateSlashes, - FlagRemoveWWW, - FlagAddWWW, - FlagSortQuery, - FlagDecodeDWORDHost, - FlagDecodeOctalHost, - FlagDecodeHexHost, - FlagRemoveUnnecessaryHostDots, - FlagRemoveEmptyPortSeparator, - FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last - FlagAddTrailingSlash, -} - -// ... and then the map, where order is unimportant -var flags = map[NormalizationFlags]func(*url.URL){ - FlagLowercaseScheme: lowercaseScheme, - FlagLowercaseHost: lowercaseHost, - FlagRemoveDefaultPort: removeDefaultPort, - FlagRemoveDirectoryIndex: removeDirectoryIndex, - FlagRemoveDotSegments: removeDotSegments, - FlagRemoveFragment: removeFragment, - FlagForceHTTP: forceHTTP, - FlagRemoveDuplicateSlashes: removeDuplicateSlashes, - FlagRemoveWWW: removeWWW, - FlagAddWWW: addWWW, - FlagSortQuery: sortQuery, - FlagDecodeDWORDHost: decodeDWORDHost, - FlagDecodeOctalHost: decodeOctalHost, - FlagDecodeHexHost: decodeHexHost, - FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, - FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, - FlagRemoveTrailingSlash: removeTrailingSlash, - FlagAddTrailingSlash: addTrailingSlash, -} - -// MustNormalizeURLString returns the normalized string, and panics if an error occurs. -// It takes an URL string as input, as well as the normalization flags. -func MustNormalizeURLString(u string, f NormalizationFlags) string { - result, e := NormalizeURLString(u, f) - if e != nil { - panic(e) - } - return result -} - -// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. -// It takes an URL string as input, as well as the normalization flags. -func NormalizeURLString(u string, f NormalizationFlags) (string, error) { - parsed, err := url.Parse(u) - if err != nil { - return "", err - } - - if f&FlagLowercaseHost == FlagLowercaseHost { - parsed.Host = strings.ToLower(parsed.Host) - } - - // The idna package doesn't fully conform to RFC 5895 - // (https://tools.ietf.org/html/rfc5895), so we do it here. - // Taken from Go 1.8 cycle source, courtesy of bradfitz. - // TODO: Remove when (if?) idna package conforms to RFC 5895. - parsed.Host = width.Fold.String(parsed.Host) - parsed.Host = norm.NFC.String(parsed.Host) - if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { - return "", err - } - - return NormalizeURL(parsed, f), nil -} - -// NormalizeURL returns the normalized string. -// It takes a parsed URL object as input, as well as the normalization flags. -func NormalizeURL(u *url.URL, f NormalizationFlags) string { - for _, k := range flagsOrder { - if f&k == k { - flags[k](u) - } - } - return urlesc.Escape(u) -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { - return "" - } - return val - }) - } -} - -func removeTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if strings.HasSuffix(u.Path, "/") { - u.Path = u.Path[:l-1] - } - } else if l = len(u.Host); l > 0 { - if strings.HasSuffix(u.Host, "/") { - u.Host = u.Host[:l-1] - } - } -} - -func addTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } else if l = len(u.Host); l > 0 { - if !strings.HasSuffix(u.Host, "/") { - u.Host += "/" - } - } -} - -func removeDotSegments(u *url.URL) { - if len(u.Path) > 0 { - var dotFree []string - var lastIsDot bool - - sections := strings.Split(u.Path, "/") - for _, s := range sections { - if s == ".." { - if len(dotFree) > 0 { - dotFree = dotFree[:len(dotFree)-1] - } - } else if s != "." { - dotFree = append(dotFree, s) - } - lastIsDot = (s == "." || s == "..") - } - // Special case if host does not end with / and new path does not begin with / - u.Path = strings.Join(dotFree, "/") - if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - // Special case if the last segment was a dot, make sure the path ends with a slash - if lastIsDot && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } -} - -func removeDirectoryIndex(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") - } -} - -func removeFragment(u *url.URL) { - u.Fragment = "" -} - -func forceHTTP(u *url.URL) { - if strings.ToLower(u.Scheme) == "https" { - u.Scheme = "http" - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} - -func removeWWW(u *url.URL) { - if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = u.Host[4:] - } -} - -func addWWW(u *url.URL) { - if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = "www." + u.Host - } -} - -func sortQuery(u *url.URL) { - q := u.Query() - - if len(q) > 0 { - arKeys := make([]string, len(q)) - i := 0 - for k := range q { - arKeys[i] = k - i++ - } - sort.Strings(arKeys) - buf := new(bytes.Buffer) - for _, k := range arKeys { - sort.Strings(q[k]) - for _, v := range q[k] { - if buf.Len() > 0 { - buf.WriteRune('&') - } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) - } - } - - // Rebuild the raw query string - u.RawQuery = buf.String() - } -} - -func decodeDWORDHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { - var parts [4]int64 - - dword, _ := strconv.ParseInt(matches[1], 10, 0) - for i, shift := range []uint{24, 16, 8, 0} { - parts[i] = dword >> shift & 0xFF - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) - } - } -} - -func decodeOctalHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { - var parts [4]int64 - - for i := 1; i <= 4; i++ { - parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) - } - } -} - -func decodeHexHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { - // Conversion is safe because of regex validation - parsed, _ := strconv.ParseInt(matches[1], 16, 0) - // Set host as DWORD (base 10) encoded host - u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) - // The rest is the same as decoding a DWORD host - decodeDWORDHost(u) - } - } -} - -func removeUnncessaryHostDots(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { - // Trim the leading and trailing dots - u.Host = strings.Trim(matches[1], ".") - if len(matches) > 2 { - u.Host += matches[2] - } - } - } -} - -func removeEmptyPortSeparator(u *url.URL) { - if len(u.Host) > 0 { - u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") - } -} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225f..00000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE deleted file mode 100644 index 74487567..00000000 --- a/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a5..00000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go deleted file mode 100644 index 1b846245..00000000 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package urlesc implements query escaping as per RFC 3986. -// It contains some parts of the net/url package, modified so as to allow -// some reserved characters incorrectly escaped by net/url. -// See https://github.com/golang/go/issues/5684 -package urlesc - -import ( - "bytes" - "net/url" - "strings" -) - -type encoding int - -const ( - encodePath encoding = 1 + iota - encodeUserPassword - encodeQueryComponent - encodeFragment -) - -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return false - } - - switch c { - case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) - return false - - // §2.2 Reserved characters (reserved) - case ':', '/', '?', '#', '[', ']', '@', // gen-delims - '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows sub-delims and : @. - // '/', '[' and ']' can be used to assign meaning to individual path - // segments. This package only manipulates the path as a whole, - // so we allow those as well. That leaves only ? and # to escape. - return c == '?' || c == '#' - - case encodeUserPassword: // §3.2.1 - // The RFC allows : and sub-delims in - // userinfo. The parsing of userinfo treats ':' as special so we must escape - // all the gen-delims. - return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' - - case encodeQueryComponent: // §3.4 - // The RFC allows / and ?. - return c != '/' && c != '?' - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing but # - return c == '#' - } - } - - // Everything else must be escaped. - return true -} - -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - -func escape(s string, mode encoding) string { - spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c, mode) { - if c == ' ' && mode == encodeQueryComponent { - spaceCount++ - } else { - hexCount++ - } - } - } - - if spaceCount == 0 && hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { - case c == ' ' && mode == encodeQueryComponent: - t[j] = '+' - j++ - case shouldEscape(c, mode): - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - default: - t[j] = s[i] - j++ - } - } - return string(t) -} - -var uiReplacer = strings.NewReplacer( - "%21", "!", - "%27", "'", - "%28", "(", - "%29", ")", - "%2A", "*", -) - -// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. -func unescapeUserinfo(s string) string { - return uiReplacer.Replace(s) -} - -// Escape reassembles the URL into a valid URL string. -// The general form of the result is one of: -// -// scheme:opaque -// scheme://userinfo@host/path?query#fragment -// -// If u.Opaque is non-empty, String uses the first form; -// otherwise it uses the second form. -// -// In the second form, the following rules apply: -// - if u.Scheme is empty, scheme: is omitted. -// - if u.User is nil, userinfo@ is omitted. -// - if u.Host is empty, host/ is omitted. -// - if u.Scheme and u.Host are empty and u.User is nil, -// the entire scheme://userinfo@host/ is omitted. -// - if u.Host is non-empty and u.Path begins with a /, -// the form host/path does not add its own /. -// - if u.RawQuery is empty, ?query is omitted. -// - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { - var buf bytes.Buffer - if u.Scheme != "" { - buf.WriteString(u.Scheme) - buf.WriteByte(':') - } - if u.Opaque != "" { - buf.WriteString(u.Opaque) - } else { - if u.Scheme != "" || u.Host != "" || u.User != nil { - buf.WriteString("//") - if ui := u.User; ui != nil { - buf.WriteString(unescapeUserinfo(ui.String())) - buf.WriteByte('@') - } - if h := u.Host; h != "" { - buf.WriteString(h) - } - } - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - buf.WriteString(escape(u.Path, encodePath)) - } - if u.RawQuery != "" { - buf.WriteByte('?') - buf.WriteString(u.RawQuery) - } - if u.Fragment != "" { - buf.WriteByte('#') - buf.WriteString(escape(u.Fragment, encodeFragment)) - } - return buf.String() -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/.gitignore b/vendor/github.com/TykTechnologies/gojsonschema/.gitignore deleted file mode 100644 index c1e0636f..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.sw[nop] diff --git a/vendor/github.com/TykTechnologies/gojsonschema/.travis.yml b/vendor/github.com/TykTechnologies/gojsonschema/.travis.yml deleted file mode 100644 index 9cc01e8a..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 -before_install: - - go get github.com/sigu-399/gojsonreference - - go get github.com/sigu-399/gojsonpointer - - go get github.com/stretchr/testify/assert diff --git a/vendor/github.com/TykTechnologies/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/TykTechnologies/gojsonschema/LICENSE-APACHE-2.0.txt deleted file mode 100644 index 55ede8a4..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/LICENSE-APACHE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 xeipuuv - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/TykTechnologies/gojsonschema/README.md b/vendor/github.com/TykTechnologies/gojsonschema/README.md deleted file mode 100644 index 127bdd16..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/README.md +++ /dev/null @@ -1,236 +0,0 @@ -[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) - -# gojsonschema - -## Description - -An implementation of JSON Schema, based on IETF's draft v4 - Go language - -References : - -* http://json-schema.org -* http://json-schema.org/latest/json-schema-core.html -* http://json-schema.org/latest/json-schema-validation.html - -## Installation - -``` -go get github.com/xeipuuv/gojsonschema -``` - -Dependencies : -* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) -* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) -* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) - -## Usage - -### Example - -```go - -package main - -import ( - "fmt" - "github.com/xeipuuv/gojsonschema" -) - -func main() { - - schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") - documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") - - result, err := gojsonschema.Validate(schemaLoader, documentLoader) - if err != nil { - panic(err.Error()) - } - - if result.Valid() { - fmt.Printf("The document is valid\n") - } else { - fmt.Printf("The document is not valid. see errors :\n") - for _, desc := range result.Errors() { - fmt.Printf("- %s\n", desc) - } - } - -} - - -``` - -#### Loaders - -There are various ways to load your JSON data. -In order to load your schemas and documents, -first declare an appropriate loader : - -* Web / HTTP, using a reference : - -```go -loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") -``` - -* Local file, using a reference : - -```go -loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") -``` - -References use the URI scheme, the prefix (file://) and a full path to the file are required. - -* JSON strings : - -```go -loader := gojsonschema.NewStringLoader(`{"type": "string"}`) -``` - -* Custom Go types : - -```go -m := map[string]interface{}{"type": "string"} -loader := gojsonschema.NewGoLoader(m) -``` - -And - -```go -type Root struct { - Users []User `json:"users"` -} - -type User struct { - Name string `json:"name"` -} - -... - -data := Root{} -data.Users = append(data.Users, User{"John"}) -data.Users = append(data.Users, User{"Sophia"}) -data.Users = append(data.Users, User{"Bill"}) - -loader := gojsonschema.NewGoLoader(data) -``` - -#### Validation - -Once the loaders are set, validation is easy : - -```go -result, err := gojsonschema.Validate(schemaLoader, documentLoader) -``` - -Alternatively, you might want to load a schema only once and process to multiple validations : - -```go -schema, err := gojsonschema.NewSchema(schemaLoader) -... -result1, err := schema.Validate(documentLoader1) -... -result2, err := schema.Validate(documentLoader2) -... -// etc ... -``` - -To check the result : - -```go - if result.Valid() { - fmt.Printf("The document is valid\n") - } else { - fmt.Printf("The document is not valid. see errors :\n") - for _, err := range result.Errors() { - // Err implements the ResultError interface - fmt.Printf("- %s\n", err) - } - } -``` - -## Working with Errors - -The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it -```go -gojsonschema.Locale = YourCustomLocale{} -``` - -However, each error contains additional contextual information. - -**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below - -Note: An error of RequiredType has an err.Type() return value of "required" - - "required": RequiredError - "invalid_type": InvalidTypeError - "number_any_of": NumberAnyOfError - "number_one_of": NumberOneOfError - "number_all_of": NumberAllOfError - "number_not": NumberNotError - "missing_dependency": MissingDependencyError - "internal": InternalError - "enum": EnumError - "array_no_additional_items": ArrayNoAdditionalItemsError - "array_min_items": ArrayMinItemsError - "array_max_items": ArrayMaxItemsError - "unique": ItemsMustBeUniqueError - "array_min_properties": ArrayMinPropertiesError - "array_max_properties": ArrayMaxPropertiesError - "additional_property_not_allowed": AdditionalPropertyNotAllowedError - "invalid_property_pattern": InvalidPropertyPatternError - "string_gte": StringLengthGTEError - "string_lte": StringLengthLTEError - "pattern": DoesNotMatchPatternError - "multiple_of": MultipleOfError - "number_gte": NumberGTEError - "number_gt": NumberGTError - "number_lte": NumberLTEError - "number_lt": NumberLTError - -**err.Value()**: *interface{}* Returns the value given - -**err.Context()**: *gojsonschema.jsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName - -**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. - -**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. - -**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* - -Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. -``` -{{.field}} must be greater than or equal to {{.min}} -``` - -## Formats -JSON Schema allows for optional "format" property to validate strings against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: -````json -{"type": "string", "format": "email"} -```` -Available formats: date-time, hostname, email, ipv4, ipv6, uri. - -For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: - -```go -// Define the format checker -type RoleFormatChecker struct {} - -// Ensure it meets the gojsonschema.FormatChecker interface -func (f RoleFormatChecker) IsFormat(input string) bool { - return strings.HasPrefix("ROLE_", input) -} - -// Add it to the library -gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) -```` - -Now to use in your json schema: -````json -{"type": "string", "format": "role"} -```` - -## Uses - -gojsonschema uses the following test suite : - -https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/TykTechnologies/gojsonschema/errors.go b/vendor/github.com/TykTechnologies/gojsonschema/errors.go deleted file mode 100644 index 1090decb..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/errors.go +++ /dev/null @@ -1,306 +0,0 @@ -package gojsonschema - -import ( - "bytes" - "sync" - "text/template" -) - -var errorTemplates errorTemplate = errorTemplate{template.New("errors-new"), sync.RWMutex{}} - -// template.Template is not thread-safe for writing, so some locking is done -// sync.RWMutex is used for efficiently locking when new templates are created -type errorTemplate struct { - *template.Template - sync.RWMutex -} - -type ( - // RequiredError. ErrorDetails: property string - RequiredError struct { - ResultErrorFields - } - - // InvalidTypeError. ErrorDetails: expected, given - InvalidTypeError struct { - ResultErrorFields - } - - // NumberAnyOfError. ErrorDetails: - - NumberAnyOfError struct { - ResultErrorFields - } - - // NumberOneOfError. ErrorDetails: - - NumberOneOfError struct { - ResultErrorFields - } - - // NumberAllOfError. ErrorDetails: - - NumberAllOfError struct { - ResultErrorFields - } - - // NumberNotError. ErrorDetails: - - NumberNotError struct { - ResultErrorFields - } - - // MissingDependencyError. ErrorDetails: dependency - MissingDependencyError struct { - ResultErrorFields - } - - // InternalError. ErrorDetails: error - InternalError struct { - ResultErrorFields - } - - // EnumError. ErrorDetails: allowed - EnumError struct { - ResultErrorFields - } - - // ArrayNoAdditionalItemsError. ErrorDetails: - - ArrayNoAdditionalItemsError struct { - ResultErrorFields - } - - // ArrayMinItemsError. ErrorDetails: min - ArrayMinItemsError struct { - ResultErrorFields - } - - // ArrayMaxItemsError. ErrorDetails: max - ArrayMaxItemsError struct { - ResultErrorFields - } - - // ItemsMustBeUniqueError. ErrorDetails: type - ItemsMustBeUniqueError struct { - ResultErrorFields - } - - // ArrayMinPropertiesError. ErrorDetails: min - ArrayMinPropertiesError struct { - ResultErrorFields - } - - // ArrayMaxPropertiesError. ErrorDetails: max - ArrayMaxPropertiesError struct { - ResultErrorFields - } - - // AdditionalPropertyNotAllowedError. ErrorDetails: property - AdditionalPropertyNotAllowedError struct { - ResultErrorFields - } - - // InvalidPropertyPatternError. ErrorDetails: property, pattern - InvalidPropertyPatternError struct { - ResultErrorFields - } - - // StringLengthGTEError. ErrorDetails: min - StringLengthGTEError struct { - ResultErrorFields - } - - // StringLengthLTEError. ErrorDetails: max - StringLengthLTEError struct { - ResultErrorFields - } - - // StringNumericGTEError. ErrorDetails: min_numeric - StringNumericGTEError struct { - ResultErrorFields - } - - // StringSpecialGTEError. ErrorDetails: min_special - StringSpecialGTEError struct { - ResultErrorFields - } - - // StringMultiCaseError. ErrorDetails: multi_case - StringMultiCaseError struct { - ResultErrorFields - } - - // StringSequentialError. ErrorDetails: disable_sequential - StringSequentialError struct { - ResultErrorFields - } - - // DoesNotMatchPatternError. ErrorDetails: pattern - DoesNotMatchPatternError struct { - ResultErrorFields - } - - // DoesNotMatchFormatError. ErrorDetails: format - DoesNotMatchFormatError struct { - ResultErrorFields - } - - // MultipleOfError. ErrorDetails: multiple - MultipleOfError struct { - ResultErrorFields - } - - // NumberGTEError. ErrorDetails: min - NumberGTEError struct { - ResultErrorFields - } - - // NumberGTError. ErrorDetails: min - NumberGTError struct { - ResultErrorFields - } - - // NumberLTEError. ErrorDetails: max - NumberLTEError struct { - ResultErrorFields - } - - // NumberLTError. ErrorDetails: max - NumberLTError struct { - ResultErrorFields - } -) - -// newError takes a ResultError type and sets the type, context, description, details, value, and field -func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) { - var t string - var d string - switch err.(type) { - case *RequiredError: - t = "required" - d = locale.Required() - case *InvalidTypeError: - t = "invalid_type" - d = locale.InvalidType() - case *NumberAnyOfError: - t = "number_any_of" - d = locale.NumberAnyOf() - case *NumberOneOfError: - t = "number_one_of" - d = locale.NumberOneOf() - case *NumberAllOfError: - t = "number_all_of" - d = locale.NumberAllOf() - case *NumberNotError: - t = "number_not" - d = locale.NumberNot() - case *MissingDependencyError: - t = "missing_dependency" - d = locale.MissingDependency() - case *InternalError: - t = "internal" - d = locale.Internal() - case *EnumError: - t = "enum" - d = locale.Enum() - case *ArrayNoAdditionalItemsError: - t = "array_no_additional_items" - d = locale.ArrayNoAdditionalItems() - case *ArrayMinItemsError: - t = "array_min_items" - d = locale.ArrayMinItems() - case *ArrayMaxItemsError: - t = "array_max_items" - d = locale.ArrayMaxItems() - case *ItemsMustBeUniqueError: - t = "unique" - d = locale.Unique() - case *ArrayMinPropertiesError: - t = "array_min_properties" - d = locale.ArrayMinProperties() - case *ArrayMaxPropertiesError: - t = "array_max_properties" - d = locale.ArrayMaxProperties() - case *AdditionalPropertyNotAllowedError: - t = "additional_property_not_allowed" - d = locale.AdditionalPropertyNotAllowed() - case *InvalidPropertyPatternError: - t = "invalid_property_pattern" - d = locale.InvalidPropertyPattern() - case *StringLengthGTEError: - t = "string_gte" - d = locale.StringGTE() - case *StringLengthLTEError: - t = "string_lte" - d = locale.StringLTE() - case *StringNumericGTEError: - t = "numeric_gte" - d = locale.NumericGTE() - case *StringSpecialGTEError: - t = "special_gte" - d = locale.SpecialGTE() - case *StringMultiCaseError: - t = "multi_case" - d = locale.MultiCase() - case *StringSequentialError: - t = "disable_sequential" - d = locale.Sequential() - case *DoesNotMatchPatternError: - t = "pattern" - d = locale.DoesNotMatchPattern() - case *DoesNotMatchFormatError: - t = "format" - d = locale.DoesNotMatchFormat() - case *MultipleOfError: - t = "multiple_of" - d = locale.MultipleOf() - case *NumberGTEError: - t = "number_gte" - d = locale.NumberGTE() - case *NumberGTError: - t = "number_gt" - d = locale.NumberGT() - case *NumberLTEError: - t = "number_lte" - d = locale.NumberLTE() - case *NumberLTError: - t = "number_lt" - d = locale.NumberLT() - } - - err.SetType(t) - err.SetContext(context) - err.SetValue(value) - err.SetDetails(details) - details["field"] = err.Field() - err.SetDescription(formatErrorDescription(d, details)) -} - -// formatErrorDescription takes a string in the default text/template -// format and converts it to a string with replacements. The fields come -// from the ErrorDetails struct and vary for each type of error. -func formatErrorDescription(s string, details ErrorDetails) string { - - var tpl *template.Template - var descrAsBuffer bytes.Buffer - var err error - - errorTemplates.RLock() - tpl = errorTemplates.Lookup(s) - errorTemplates.RUnlock() - - if tpl == nil { - errorTemplates.Lock() - tpl = errorTemplates.New(s) - - tpl, err = tpl.Parse(s) - errorTemplates.Unlock() - - if err != nil { - return err.Error() - } - } - - err = tpl.Execute(&descrAsBuffer, details) - if err != nil { - return err.Error() - } - - return descrAsBuffer.String() -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/format_checkers.go b/vendor/github.com/TykTechnologies/gojsonschema/format_checkers.go deleted file mode 100644 index c7214b04..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/format_checkers.go +++ /dev/null @@ -1,194 +0,0 @@ -package gojsonschema - -import ( - "net" - "net/url" - "reflect" - "regexp" - "strings" - "time" -) - -type ( - // FormatChecker is the interface all formatters added to FormatCheckerChain must implement - FormatChecker interface { - IsFormat(input string) bool - } - - // FormatCheckerChain holds the formatters - FormatCheckerChain struct { - formatters map[string]FormatChecker - } - - // EmailFormatter verifies email address formats - EmailFormatChecker struct{} - - // IPV4FormatChecker verifies IP addresses in the ipv4 format - IPV4FormatChecker struct{} - - // IPV6FormatChecker verifies IP addresses in the ipv6 format - IPV6FormatChecker struct{} - - // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 - // - // Valid formats: - // Partial Time: HH:MM:SS - // Full Date: YYYY-MM-DD - // Full Time: HH:MM:SSZ-07:00 - // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 - // - // Where - // YYYY = 4DIGIT year - // MM = 2DIGIT month ; 01-12 - // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year - // HH = 2DIGIT hour ; 00-23 - // MM = 2DIGIT ; 00-59 - // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules - // T = Literal - // Z = Literal - // - // Note: Nanoseconds are also suported in all formats - // - // http://tools.ietf.org/html/rfc3339#section-5.6 - DateTimeFormatChecker struct{} - - // URIFormatCheckers validates a URI with a valid Scheme per RFC3986 - URIFormatChecker struct{} - - // HostnameFormatChecker validates a hostname is in the correct format - HostnameFormatChecker struct{} - - // UUIDFormatChecker validates a UUID is in the correct format - UUIDFormatChecker struct{} - - // RegexFormatChecker validates a regex is in the correct format - RegexFormatChecker struct{} -) - -var ( - // Formatters holds the valid formatters, and is a public variable - // so library users can add custom formatters - FormatCheckers = FormatCheckerChain{ - formatters: map[string]FormatChecker{ - "date-time": DateTimeFormatChecker{}, - "hostname": HostnameFormatChecker{}, - "email": EmailFormatChecker{}, - "ipv4": IPV4FormatChecker{}, - "ipv6": IPV6FormatChecker{}, - "uri": URIFormatChecker{}, - "uuid": UUIDFormatChecker{}, - "regex": RegexFormatChecker{}, - }, - } - - // Regex credit: https://github.com/asaskevich/govalidator - rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$") - - // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname - rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) - - rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") -) - -// Add adds a FormatChecker to the FormatCheckerChain -// The name used will be the value used for the format key in your json schema -func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { - c.formatters[name] = f - - return c -} - -// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) -func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { - delete(c.formatters, name) - - return c -} - -// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name -func (c *FormatCheckerChain) Has(name string) bool { - _, ok := c.formatters[name] - - return ok -} - -// IsFormat will check an input against a FormatChecker with the given name -// to see if it is the correct format -func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { - f, ok := c.formatters[name] - - if !ok { - return false - } - - if !isKind(input, reflect.String) { - return false - } - - inputString := input.(string) - - return f.IsFormat(inputString) -} - -func (f EmailFormatChecker) IsFormat(input string) bool { - return rxEmail.MatchString(input) -} - -// Credit: https://github.com/asaskevich/govalidator -func (f IPV4FormatChecker) IsFormat(input string) bool { - ip := net.ParseIP(input) - return ip != nil && strings.Contains(input, ".") -} - -// Credit: https://github.com/asaskevich/govalidator -func (f IPV6FormatChecker) IsFormat(input string) bool { - ip := net.ParseIP(input) - return ip != nil && strings.Contains(input, ":") -} - -func (f DateTimeFormatChecker) IsFormat(input string) bool { - formats := []string{ - "15:04:05", - "15:04:05Z07:00", - "2006-01-02", - time.RFC3339, - time.RFC3339Nano, - } - - for _, format := range formats { - if _, err := time.Parse(format, input); err == nil { - return true - } - } - - return false -} - -func (f URIFormatChecker) IsFormat(input string) bool { - u, err := url.Parse(input) - if err != nil || u.Scheme == "" { - return false - } - - return true -} - -func (f HostnameFormatChecker) IsFormat(input string) bool { - return rxHostname.MatchString(input) && len(input) < 256 -} - -func (f UUIDFormatChecker) IsFormat(input string) bool { - return rxUUID.MatchString(input) -} - -// IsFormat implements FormatChecker interface. -func (f RegexFormatChecker) IsFormat(input string) bool { - if input == "" { - return true - } - _, err := regexp.Compile(input) - if err != nil { - return false - } - return true -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/glide.yaml b/vendor/github.com/TykTechnologies/gojsonschema/glide.yaml deleted file mode 100644 index 7aef8c09..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/glide.yaml +++ /dev/null @@ -1,12 +0,0 @@ -package: github.com/xeipuuv/gojsonschema -license: Apache 2.0 -import: -- package: github.com/xeipuuv/gojsonschema - -- package: github.com/xeipuuv/gojsonpointer - -- package: github.com/xeipuuv/gojsonreference - -- package: github.com/stretchr/testify/assert - version: ^1.1.3 - diff --git a/vendor/github.com/TykTechnologies/gojsonschema/internalLog.go b/vendor/github.com/TykTechnologies/gojsonschema/internalLog.go deleted file mode 100644 index 4ef7a8d0..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/internalLog.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Very simple log wrapper. -// Used for debugging/testing purposes. -// -// created 01-01-2015 - -package gojsonschema - -import ( - "log" -) - -const internalLogEnabled = false - -func internalLog(format string, v ...interface{}) { - log.Printf(format, v...) -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/jsonContext.go b/vendor/github.com/TykTechnologies/gojsonschema/jsonContext.go deleted file mode 100644 index fcc8d9d6..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/jsonContext.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2013 MongoDB, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author tolsen -// author-github https://github.com/tolsen -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context -// -// created 04-09-2013 - -package gojsonschema - -import "bytes" - -// jsonContext implements a persistent linked-list of strings -type jsonContext struct { - head string - tail *jsonContext -} - -func newJsonContext(head string, tail *jsonContext) *jsonContext { - return &jsonContext{head, tail} -} - -// String displays the context in reverse. -// This plays well with the data structure's persistent nature with -// Cons and a json document's tree structure. -func (c *jsonContext) String(del ...string) string { - byteArr := make([]byte, 0, c.stringLen()) - buf := bytes.NewBuffer(byteArr) - c.writeStringToBuffer(buf, del) - - return buf.String() -} - -func (c *jsonContext) stringLen() int { - length := 0 - if c.tail != nil { - length = c.tail.stringLen() + 1 // add 1 for "." - } - - length += len(c.head) - return length -} - -func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { - if c.tail != nil { - c.tail.writeStringToBuffer(buf, del) - - if len(del) > 0 { - buf.WriteString(del[0]) - } else { - buf.WriteString(".") - } - } - - buf.WriteString(c.head) -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/jsonLoader.go b/vendor/github.com/TykTechnologies/gojsonschema/jsonLoader.go deleted file mode 100644 index cab6ed05..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/jsonLoader.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Different strategies to load JSON files. -// Includes References (file and HTTP), JSON strings and Go types. -// -// created 01-02-2015 - -package gojsonschema - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/xeipuuv/gojsonreference" -) - -var osFS = osFileSystem(os.Open) - -// JSON loader interface - -type JSONLoader interface { - JsonSource() interface{} - LoadJSON() (interface{}, error) - JsonReference() (gojsonreference.JsonReference, error) - LoaderFactory() JSONLoaderFactory -} - -type JSONLoaderFactory interface { - New(source string) JSONLoader -} - -type DefaultJSONLoaderFactory struct { -} - -type FileSystemJSONLoaderFactory struct { - fs http.FileSystem -} - -func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: osFS, - source: source, - } -} - -func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: f.fs, - source: source, - } -} - -// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. -type osFileSystem func(string) (*os.File, error) - -func (o osFileSystem) Open(name string) (http.File, error) { - return o(name) -} - -// JSON Reference loader -// references are used to load JSONs from files and HTTP - -type jsonReferenceLoader struct { - fs http.FileSystem - source string -} - -func (l *jsonReferenceLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference(l.JsonSource().(string)) -} - -func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { - return &FileSystemJSONLoaderFactory{ - fs: l.fs, - } -} - -// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. -func NewReferenceLoader(source string) *jsonReferenceLoader { - return &jsonReferenceLoader{ - fs: osFS, - source: source, - } -} - -// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. -func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) *jsonReferenceLoader { - return &jsonReferenceLoader{ - fs: fs, - source: source, - } -} - -func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { - - var err error - - reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) - if err != nil { - return nil, err - } - - refToUrl := reference - refToUrl.GetUrl().Fragment = "" - - var document interface{} - - if reference.HasFileScheme { - - filename := strings.Replace(refToUrl.GetUrl().Path, "file://", "", -1) - if runtime.GOOS == "windows" { - // on Windows, a file URL may have an extra leading slash, use slashes - // instead of backslashes, and have spaces escaped - if strings.HasPrefix(filename, "/") { - filename = filename[1:] - } - filename = filepath.FromSlash(filename) - } - - document, err = l.loadFromFile(filename) - if err != nil { - return nil, err - } - - } else { - - document, err = l.loadFromHTTP(refToUrl.String()) - if err != nil { - return nil, err - } - - } - - return document, nil - -} - -func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { - - resp, err := http.Get(address) - if err != nil { - return nil, err - } - - // must return HTTP Status 200 OK - if resp.StatusCode != http.StatusOK { - return nil, errors.New(formatErrorDescription(Locale.httpBadStatus(), ErrorDetails{"status": resp.Status})) - } - - bodyBuff, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return decodeJsonUsingNumber(bytes.NewReader(bodyBuff)) - -} - -func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { - f, err := l.fs.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - bodyBuff, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - return decodeJsonUsingNumber(bytes.NewReader(bodyBuff)) - -} - -// JSON string loader - -type jsonStringLoader struct { - source string -} - -func (l *jsonStringLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func NewStringLoader(source string) *jsonStringLoader { - return &jsonStringLoader{source: source} -} - -func (l *jsonStringLoader) LoadJSON() (interface{}, error) { - - return decodeJsonUsingNumber(strings.NewReader(l.JsonSource().(string))) - -} - -// JSON bytes loader - -type jsonBytesLoader struct { - source []byte -} - -func (l *jsonBytesLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func NewBytesLoader(source []byte) *jsonBytesLoader { - return &jsonBytesLoader{source: source} -} - -func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { - return decodeJsonUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) -} - -// JSON Go (types) loader -// used to load JSONs from the code as maps, interface{}, structs ... - -type jsonGoLoader struct { - source interface{} -} - -func (l *jsonGoLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func NewGoLoader(source interface{}) *jsonGoLoader { - return &jsonGoLoader{source: source} -} - -func (l *jsonGoLoader) LoadJSON() (interface{}, error) { - - // convert it to a compliant JSON first to avoid types "mismatches" - - jsonBytes, err := json.Marshal(l.JsonSource()) - if err != nil { - return nil, err - } - - return decodeJsonUsingNumber(bytes.NewReader(jsonBytes)) - -} - -type jsonIOLoader struct { - buf *bytes.Buffer -} - -func NewReaderLoader(source io.Reader) (*jsonIOLoader, io.Reader) { - buf := &bytes.Buffer{} - return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) -} - -func NewWriterLoader(source io.Writer) (*jsonIOLoader, io.Writer) { - buf := &bytes.Buffer{} - return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) -} - -func (l *jsonIOLoader) JsonSource() interface{} { - return l.buf.String() -} - -func (l *jsonIOLoader) LoadJSON() (interface{}, error) { - return decodeJsonUsingNumber(l.buf) -} - -func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func decodeJsonUsingNumber(r io.Reader) (interface{}, error) { - - var document interface{} - - decoder := json.NewDecoder(r) - decoder.UseNumber() - - err := decoder.Decode(&document) - if err != nil { - return nil, err - } - - return document, nil - -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/locales.go b/vendor/github.com/TykTechnologies/gojsonschema/locales.go deleted file mode 100644 index 1c3171d1..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/locales.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Contains const string and messages. -// -// created 01-01-2015 - -package gojsonschema - -type ( - // locale is an interface for definining custom error strings - locale interface { - Required() string - InvalidType() string - NumberAnyOf() string - NumberOneOf() string - NumberAllOf() string - NumberNot() string - MissingDependency() string - Internal() string - Enum() string - ArrayNotEnoughItems() string - ArrayNoAdditionalItems() string - ArrayMinItems() string - ArrayMaxItems() string - Unique() string - ArrayMinProperties() string - ArrayMaxProperties() string - AdditionalPropertyNotAllowed() string - InvalidPropertyPattern() string - StringGTE() string - StringLTE() string - NumericGTE() string - SpecialGTE() string - MultiCase() string - Sequential() string - DoesNotMatchPattern() string - DoesNotMatchFormat() string - MultipleOf() string - NumberGTE() string - NumberGT() string - NumberLTE() string - NumberLT() string - - // Schema validations - RegexPattern() string - GreaterThanZero() string - MustBeOfA() string - MustBeOfAn() string - CannotBeUsedWithout() string - CannotBeGT() string - MustBeOfType() string - MustBeValidRegex() string - MustBeValidFormat() string - MustBeGTEZero() string - KeyCannotBeGreaterThan() string - KeyItemsMustBeOfType() string - KeyItemsMustBeUnique() string - ReferenceMustBeCanonical() string - NotAValidType() string - Duplicated() string - httpBadStatus() string - - // ErrorFormat - ErrorFormat() string - } - - // DefaultLocale is the default locale for this package - DefaultLocale struct{} -) - -func (l DefaultLocale) Required() string { - return `{{.property}} is required` -} - -func (l DefaultLocale) InvalidType() string { - return `Invalid type. Expected: {{.expected}}, given: {{.given}}` -} - -func (l DefaultLocale) NumberAnyOf() string { - return `Must validate at least one schema (anyOf)` -} - -func (l DefaultLocale) NumberOneOf() string { - return `Must validate one and only one schema (oneOf)` -} - -func (l DefaultLocale) NumberAllOf() string { - return `Must validate all the schemas (allOf)` -} - -func (l DefaultLocale) NumberNot() string { - return `Must not validate the schema (not)` -} - -func (l DefaultLocale) MissingDependency() string { - return `Has a dependency on {{.dependency}}` -} - -func (l DefaultLocale) Internal() string { - return `Internal Error {{.error}}` -} - -func (l DefaultLocale) Enum() string { - return `{{.field}} must be one of the following: {{.allowed}}` -} - -func (l DefaultLocale) ArrayNoAdditionalItems() string { - return `No additional items allowed on array` -} - -func (l DefaultLocale) ArrayNotEnoughItems() string { - return `Not enough items on array to match positional list of schema` -} - -func (l DefaultLocale) ArrayMinItems() string { - return `Array must have at least {{.min}} items` -} - -func (l DefaultLocale) ArrayMaxItems() string { - return `Array must have at most {{.max}} items` -} - -func (l DefaultLocale) Unique() string { - return `{{.type}} items must be unique` -} - -func (l DefaultLocale) ArrayMinProperties() string { - return `Must have at least {{.min}} properties` -} - -func (l DefaultLocale) ArrayMaxProperties() string { - return `Must have at most {{.max}} properties` -} - -func (l DefaultLocale) AdditionalPropertyNotAllowed() string { - return `Additional property {{.property}} is not allowed` -} - -func (l DefaultLocale) InvalidPropertyPattern() string { - return `Property "{{.property}}" does not match pattern {{.pattern}}` -} - -func (l DefaultLocale) StringGTE() string { - return `String length must be greater than or equal to {{.min}}` -} - -func (l DefaultLocale) StringLTE() string { - return `String length must be less than or equal to {{.max}}` -} - -func (l DefaultLocale) NumericGTE() string { - return `String must include at least {{.min_numeric}} numeric characters` -} - -func (l DefaultLocale) SpecialGTE() string { - return `String must include at least {{.min_special}} special characters (like '@', '$', '*' etc.)` -} - -func (l DefaultLocale) MultiCase() string { - return `String must include both lower and upper case characters` -} - -func (l DefaultLocale) Sequential() string { - return `String must not include sequential chars: {{.sequential_chars}}` -} - -func (l DefaultLocale) DoesNotMatchPattern() string { - return `Does not match pattern '{{.pattern}}'` -} - -func (l DefaultLocale) DoesNotMatchFormat() string { - return `Does not match format '{{.format}}'` -} - -func (l DefaultLocale) MultipleOf() string { - return `Must be a multiple of {{.multiple}}` -} - -func (l DefaultLocale) NumberGTE() string { - return `Must be greater than or equal to {{.min}}` -} - -func (l DefaultLocale) NumberGT() string { - return `Must be greater than {{.min}}` -} - -func (l DefaultLocale) NumberLTE() string { - return `Must be less than or equal to {{.max}}` -} - -func (l DefaultLocale) NumberLT() string { - return `Must be less than {{.max}}` -} - -// Schema validators -func (l DefaultLocale) RegexPattern() string { - return `Invalid regex pattern '{{.pattern}}'` -} - -func (l DefaultLocale) GreaterThanZero() string { - return `{{.number}} must be strictly greater than 0` -} - -func (l DefaultLocale) MustBeOfA() string { - return `{{.x}} must be of a {{.y}}` -} - -func (l DefaultLocale) MustBeOfAn() string { - return `{{.x}} must be of an {{.y}}` -} - -func (l DefaultLocale) CannotBeUsedWithout() string { - return `{{.x}} cannot be used without {{.y}}` -} - -func (l DefaultLocale) CannotBeGT() string { - return `{{.x}} cannot be greater than {{.y}}` -} - -func (l DefaultLocale) MustBeOfType() string { - return `{{.key}} must be of type {{.type}}` -} - -func (l DefaultLocale) MustBeValidRegex() string { - return `{{.key}} must be a valid regex` -} - -func (l DefaultLocale) MustBeValidFormat() string { - return `{{.key}} must be a valid format {{.given}}` -} - -func (l DefaultLocale) MustBeGTEZero() string { - return `{{.key}} must be greater than or equal to 0` -} - -func (l DefaultLocale) KeyCannotBeGreaterThan() string { - return `{{.key}} cannot be greater than {{.y}}` -} - -func (l DefaultLocale) KeyItemsMustBeOfType() string { - return `{{.key}} items must be {{.type}}` -} - -func (l DefaultLocale) KeyItemsMustBeUnique() string { - return `{{.key}} items must be unique` -} - -func (l DefaultLocale) ReferenceMustBeCanonical() string { - return `Reference {{.reference}} must be canonical` -} - -func (l DefaultLocale) NotAValidType() string { - return `{{.type}} is not a valid type -- ` -} - -func (l DefaultLocale) Duplicated() string { - return `{{.type}} type is duplicated` -} - -func (l DefaultLocale) httpBadStatus() string { - return `Could not read schema from HTTP, response status is {{.status}}` -} - -// Replacement options: field, description, context, value -func (l DefaultLocale) ErrorFormat() string { - return `{{.field}}: {{.description}}` -} - -const ( - STRING_NUMBER = "number" - STRING_ARRAY_OF_STRINGS = "array of strings" - STRING_ARRAY_OF_SCHEMAS = "array of schemas" - STRING_SCHEMA = "schema" - STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" - STRING_PROPERTIES = "properties" - STRING_DEPENDENCY = "dependency" - STRING_PROPERTY = "property" - STRING_UNDEFINED = "undefined" - STRING_CONTEXT_ROOT = "(root)" - STRING_ROOT_SCHEMA_PROPERTY = "(root)" -) diff --git a/vendor/github.com/TykTechnologies/gojsonschema/result.go b/vendor/github.com/TykTechnologies/gojsonschema/result.go deleted file mode 100644 index 6ad56ae8..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/result.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Result and ResultError implementations. -// -// created 01-01-2015 - -package gojsonschema - -import ( - "fmt" - "strings" -) - -type ( - // ErrorDetails is a map of details specific to each error. - // While the values will vary, every error will contain a "field" value - ErrorDetails map[string]interface{} - - // ResultError is the interface that library errors must implement - ResultError interface { - Field() string - SetType(string) - Type() string - SetContext(*jsonContext) - Context() *jsonContext - SetDescription(string) - Description() string - SetValue(interface{}) - Value() interface{} - SetDetails(ErrorDetails) - Details() ErrorDetails - String() string - } - - // ResultErrorFields holds the fields for each ResultError implementation. - // ResultErrorFields implements the ResultError interface, so custom errors - // can be defined by just embedding this type - ResultErrorFields struct { - errorType string // A string with the type of error (i.e. invalid_type) - context *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... - description string // A human readable error message - value interface{} // Value given by the JSON file that is the source of the error - details ErrorDetails - } - - Result struct { - errors []ResultError - // Scores how well the validation matched. Useful in generating - // better error messages for anyOf and oneOf. - score int - } -) - -// Field outputs the field name without the root context -// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName -func (v *ResultErrorFields) Field() string { - if p, ok := v.Details()["property"]; ok { - if str, isString := p.(string); isString { - return str - } - } - - return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") -} - -func (v *ResultErrorFields) SetType(errorType string) { - v.errorType = errorType -} - -func (v *ResultErrorFields) Type() string { - return v.errorType -} - -func (v *ResultErrorFields) SetContext(context *jsonContext) { - v.context = context -} - -func (v *ResultErrorFields) Context() *jsonContext { - return v.context -} - -func (v *ResultErrorFields) SetDescription(description string) { - v.description = description -} - -func (v *ResultErrorFields) Description() string { - return v.description -} - -func (v *ResultErrorFields) SetValue(value interface{}) { - v.value = value -} - -func (v *ResultErrorFields) Value() interface{} { - return v.value -} - -func (v *ResultErrorFields) SetDetails(details ErrorDetails) { - v.details = details -} - -func (v *ResultErrorFields) Details() ErrorDetails { - return v.details -} - -func (v ResultErrorFields) String() string { - // as a fallback, the value is displayed go style - valueString := fmt.Sprintf("%v", v.value) - - // marshal the go value value to json - if v.value == nil { - valueString = TYPE_NULL - } else { - if vs, err := marshalToJsonString(v.value); err == nil { - if vs == nil { - valueString = TYPE_NULL - } else { - valueString = *vs - } - } - } - - return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ - "context": v.context.String(), - "description": v.description, - "value": valueString, - "field": v.Field(), - }) -} - -func (v *Result) Valid() bool { - return len(v.errors) == 0 -} - -func (v *Result) Errors() []ResultError { - return v.errors -} - -func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) { - newError(err, context, value, Locale, details) - v.errors = append(v.errors, err) - v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function -} - -// Used to copy errors from a sub-schema to the main one -func (v *Result) mergeErrors(otherResult *Result) { - v.errors = append(v.errors, otherResult.Errors()...) - v.score += otherResult.score -} - -func (v *Result) incrementScore() { - v.score++ -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/schema.go b/vendor/github.com/TykTechnologies/gojsonschema/schema.go deleted file mode 100644 index f7121291..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/schema.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines Schema, the main entry to every subSchema. -// Contains the parsing logic and error checking. -// -// created 26-02-2013 - -package gojsonschema - -import ( - // "encoding/json" - "errors" - "reflect" - "regexp" - - "github.com/xeipuuv/gojsonreference" -) - -var ( - // Locale is the default locale to use - // Library users can overwrite with their own implementation - Locale locale = DefaultLocale{} -) - -func NewSchema(l JSONLoader) (*Schema, error) { - ref, err := l.JsonReference() - if err != nil { - return nil, err - } - - d := Schema{} - d.pool = newSchemaPool(l.LoaderFactory()) - d.documentReference = ref - d.referencePool = newSchemaReferencePool() - - var doc interface{} - if ref.String() != "" { - // Get document from schema pool - spd, err := d.pool.GetDocument(d.documentReference) - if err != nil { - return nil, err - } - doc = spd.Document - } else { - // Load JSON directly - doc, err = l.LoadJSON() - if err != nil { - return nil, err - } - d.pool.SetStandaloneDocument(doc) - } - - err = d.parse(doc) - if err != nil { - return nil, err - } - - return &d, nil -} - -type Schema struct { - documentReference gojsonreference.JsonReference - rootSchema *subSchema - pool *schemaPool - referencePool *schemaReferencePool -} - -func (d *Schema) parse(document interface{}) error { - d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY} - return d.parseSchema(document, d.rootSchema) -} - -func (d *Schema) SetRootSchemaName(name string) { - d.rootSchema.property = name -} - -// Parses a subSchema -// -// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring -// Not much magic involved here, most of the job is to validate the key names and their values, -// then the values are copied into subSchema struct -// -func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_OBJECT, - "given": STRING_SCHEMA, - }, - )) - } - - m := documentNode.(map[string]interface{}) - - if currentSchema == d.rootSchema { - currentSchema.ref = &d.documentReference - } - - // $subSchema - if existsMapKey(m, KEY_SCHEMA) { - if !isKind(m[KEY_SCHEMA], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_SCHEMA, - }, - )) - } - schemaRef := m[KEY_SCHEMA].(string) - schemaReference, err := gojsonreference.NewJsonReference(schemaRef) - currentSchema.subSchema = &schemaReference - if err != nil { - return err - } - } - - // $ref - if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_REF, - }, - )) - } - if k, ok := m[KEY_REF].(string); ok { - - jsonReference, err := gojsonreference.NewJsonReference(k) - if err != nil { - return err - } - - if jsonReference.HasFullUrl { - currentSchema.ref = &jsonReference - } else { - inheritedReference, err := currentSchema.ref.Inherits(jsonReference) - if err != nil { - return err - } - - currentSchema.ref = inheritedReference - } - - if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok { - currentSchema.refSchema = sch - - } else { - err := d.parseReference(documentNode, currentSchema, k) - if err != nil { - return err - } - - return nil - } - } - - // definitions - if existsMapKey(m, KEY_DEFINITIONS) { - if isKind(m[KEY_DEFINITIONS], reflect.Map) { - currentSchema.definitions = make(map[string]*subSchema) - for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { - if isKind(dv, reflect.Map) { - newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, ref: currentSchema.ref} - currentSchema.definitions[dk] = newSchema - err := d.parseSchema(dv, newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_ARRAY_OF_SCHEMAS, - "given": KEY_DEFINITIONS, - }, - )) - } - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_ARRAY_OF_SCHEMAS, - "given": KEY_DEFINITIONS, - }, - )) - } - - } - - // id - if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_ID, - }, - )) - } - if k, ok := m[KEY_ID].(string); ok { - currentSchema.id = &k - } - - // title - if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_TITLE, - }, - )) - } - if k, ok := m[KEY_TITLE].(string); ok { - currentSchema.title = &k - } - - // description - if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_DESCRIPTION, - }, - )) - } - if k, ok := m[KEY_DESCRIPTION].(string); ok { - currentSchema.description = &k - } - - // type - if existsMapKey(m, KEY_TYPE) { - if isKind(m[KEY_TYPE], reflect.String) { - if k, ok := m[KEY_TYPE].(string); ok { - err := currentSchema.types.Add(k) - if err != nil { - return err - } - } - } else { - if isKind(m[KEY_TYPE], reflect.Slice) { - arrayOfTypes := m[KEY_TYPE].([]interface{}) - for _, typeInArray := range arrayOfTypes { - if reflect.ValueOf(typeInArray).Kind() != reflect.String { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, - "given": KEY_TYPE, - }, - )) - } else { - currentSchema.types.Add(typeInArray.(string)) - } - } - - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, - "given": KEY_TYPE, - }, - )) - } - } - } - - // properties - if existsMapKey(m, KEY_PROPERTIES) { - err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) - if err != nil { - return err - } - } - - // additionalProperties - if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { - if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { - currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) - } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { - newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} - currentSchema.additionalProperties = newSchema - err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, - "given": KEY_ADDITIONAL_PROPERTIES, - }, - )) - } - } - - // patternProperties - if existsMapKey(m, KEY_PATTERN_PROPERTIES) { - if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { - patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) - if len(patternPropertiesMap) > 0 { - currentSchema.patternProperties = make(map[string]*subSchema) - for k, v := range patternPropertiesMap { - _, err := regexp.MatchString(k, "") - if err != nil { - return errors.New(formatErrorDescription( - Locale.RegexPattern(), - ErrorDetails{"pattern": k}, - )) - } - newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} - err = d.parseSchema(v, newSchema) - if err != nil { - return errors.New(err.Error()) - } - currentSchema.patternProperties[k] = newSchema - } - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA, - "given": KEY_PATTERN_PROPERTIES, - }, - )) - } - } - - // dependencies - if existsMapKey(m, KEY_DEPENDENCIES) { - err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) - if err != nil { - return err - } - } - - // items - if existsMapKey(m, KEY_ITEMS) { - if isKind(m[KEY_ITEMS], reflect.Slice) { - for _, itemElement := range m[KEY_ITEMS].([]interface{}) { - if isKind(itemElement, reflect.Map) { - newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} - newSchema.ref = currentSchema.ref - currentSchema.AddItemsChild(newSchema) - err := d.parseSchema(itemElement, newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, - "given": KEY_ITEMS, - }, - )) - } - currentSchema.itemsChildrenIsSingleSchema = false - } - } else if isKind(m[KEY_ITEMS], reflect.Map) { - newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} - newSchema.ref = currentSchema.ref - currentSchema.AddItemsChild(newSchema) - err := d.parseSchema(m[KEY_ITEMS], newSchema) - if err != nil { - return err - } - currentSchema.itemsChildrenIsSingleSchema = true - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, - "given": KEY_ITEMS, - }, - )) - } - } - - // additionalItems - if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { - if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { - currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) - } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { - newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} - currentSchema.additionalItems = newSchema - err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, - "given": KEY_ADDITIONAL_ITEMS, - }, - )) - } - } - - // validation : number / integer - - if existsMapKey(m, KEY_MULTIPLE_OF) { - multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) - if multipleOfValue == nil { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_NUMBER, - "given": KEY_MULTIPLE_OF, - }, - )) - } - if *multipleOfValue <= 0 { - return errors.New(formatErrorDescription( - Locale.GreaterThanZero(), - ErrorDetails{"number": KEY_MULTIPLE_OF}, - )) - } - currentSchema.multipleOf = multipleOfValue - } - - if existsMapKey(m, KEY_MINIMUM) { - minimumValue := mustBeNumber(m[KEY_MINIMUM]) - if minimumValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, - )) - } - currentSchema.minimum = minimumValue - } - - if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { - if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { - if currentSchema.minimum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, - )) - } - exclusiveMinimumValue := m[KEY_EXCLUSIVE_MINIMUM].(bool) - currentSchema.exclusiveMinimum = exclusiveMinimumValue - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": TYPE_BOOLEAN}, - )) - } - } - - if existsMapKey(m, KEY_MAXIMUM) { - maximumValue := mustBeNumber(m[KEY_MAXIMUM]) - if maximumValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, - )) - } - currentSchema.maximum = maximumValue - } - - if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { - if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { - if currentSchema.maximum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, - )) - } - exclusiveMaximumValue := m[KEY_EXCLUSIVE_MAXIMUM].(bool) - currentSchema.exclusiveMaximum = exclusiveMaximumValue - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": STRING_NUMBER}, - )) - } - } - - if currentSchema.minimum != nil && currentSchema.maximum != nil { - if *currentSchema.minimum > *currentSchema.maximum { - return errors.New(formatErrorDescription( - Locale.CannotBeGT(), - ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM}, - )) - } - } - - // validation : string - - if existsMapKey(m, KEY_MIN_LENGTH) { - minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) - if minLengthIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, - )) - } - if *minLengthIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_LENGTH}, - )) - } - currentSchema.minLength = minLengthIntegerValue - } - - if existsMapKey(m, KEY_MAX_LENGTH) { - maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) - if maxLengthIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, - )) - } - if *maxLengthIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_LENGTH}, - )) - } - currentSchema.maxLength = maxLengthIntegerValue - } - - if currentSchema.minLength != nil && currentSchema.maxLength != nil { - if *currentSchema.minLength > *currentSchema.maxLength { - return errors.New(formatErrorDescription( - Locale.CannotBeGT(), - ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, - )) - } - } - - if existsMapKey(m, KEY_MIN_NUMERIC) { - minNumericIntegerValue := mustBeInteger(m[KEY_MIN_NUMERIC]) - if minNumericIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_NUMERIC, "y": TYPE_INTEGER}, - )) - } - if *minNumericIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_NUMERIC}, - )) - } - currentSchema.minNumeric = minNumericIntegerValue - } - - if existsMapKey(m, KEY_MIN_SPECIAL) { - minSpecialIntegerValue := mustBeInteger(m[KEY_MIN_SPECIAL]) - if minSpecialIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_SPECIAL, "y": TYPE_INTEGER}, - )) - } - if *minSpecialIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_SPECIAL}, - )) - } - currentSchema.minSpecial = minSpecialIntegerValue - } - - if existsMapKey(m, KEY_MULTI_CASE) { - if isKind(m[KEY_MULTI_CASE], reflect.Bool) { - currentSchema.multiCase = m[KEY_MULTI_CASE].(bool) - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MULTI_CASE, "y": TYPE_BOOLEAN}, - )) - } - } - - if existsMapKey(m, KEY_DISABLE_SEQUENTIAL) { - if isKind(m[KEY_DISABLE_SEQUENTIAL], reflect.Bool) { - currentSchema.disableSequential = m[KEY_DISABLE_SEQUENTIAL].(bool) - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_DISABLE_SEQUENTIAL, "y": TYPE_BOOLEAN}, - )) - } - } - - if existsMapKey(m, KEY_PATTERN) { - if isKind(m[KEY_PATTERN], reflect.String) { - regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) - if err != nil { - return errors.New(formatErrorDescription( - Locale.MustBeValidRegex(), - ErrorDetails{"key": KEY_PATTERN}, - )) - } - currentSchema.pattern = regexpObject - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, - )) - } - } - - if existsMapKey(m, KEY_FORMAT) { - formatString, ok := m[KEY_FORMAT].(string) - if ok && FormatCheckers.Has(formatString) { - currentSchema.format = formatString - } else { - return errors.New(formatErrorDescription( - Locale.MustBeValidFormat(), - ErrorDetails{"key": KEY_FORMAT, "given": m[KEY_FORMAT]}, - )) - } - } - - // validation : object - - if existsMapKey(m, KEY_MIN_PROPERTIES) { - minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) - if minPropertiesIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, - )) - } - if *minPropertiesIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_PROPERTIES}, - )) - } - currentSchema.minProperties = minPropertiesIntegerValue - } - - if existsMapKey(m, KEY_MAX_PROPERTIES) { - maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) - if maxPropertiesIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, - )) - } - if *maxPropertiesIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_PROPERTIES}, - )) - } - currentSchema.maxProperties = maxPropertiesIntegerValue - } - - if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { - if *currentSchema.minProperties > *currentSchema.maxProperties { - return errors.New(formatErrorDescription( - Locale.KeyCannotBeGreaterThan(), - ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, - )) - } - } - - if existsMapKey(m, KEY_REQUIRED) { - if isKind(m[KEY_REQUIRED], reflect.Slice) { - requiredValues := m[KEY_REQUIRED].([]interface{}) - for _, requiredValue := range requiredValues { - if isKind(requiredValue, reflect.String) { - err := currentSchema.AddRequired(requiredValue.(string)) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeOfType(), - ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, - )) - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, - )) - } - } - - // validation : array - - if existsMapKey(m, KEY_MIN_ITEMS) { - minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) - if minItemsIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, - )) - } - if *minItemsIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_ITEMS}, - )) - } - currentSchema.minItems = minItemsIntegerValue - } - - if existsMapKey(m, KEY_MAX_ITEMS) { - maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) - if maxItemsIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, - )) - } - if *maxItemsIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_ITEMS}, - )) - } - currentSchema.maxItems = maxItemsIntegerValue - } - - if existsMapKey(m, KEY_UNIQUE_ITEMS) { - if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { - currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, - )) - } - } - - // validation : all - - if existsMapKey(m, KEY_ENUM) { - if isKind(m[KEY_ENUM], reflect.Slice) { - for _, v := range m[KEY_ENUM].([]interface{}) { - err := currentSchema.AddEnum(v) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, - )) - } - } - - // validation : subSchema - - if existsMapKey(m, KEY_ONE_OF) { - if isKind(m[KEY_ONE_OF], reflect.Slice) { - for _, v := range m[KEY_ONE_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.AddOneOf(newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_ANY_OF) { - if isKind(m[KEY_ANY_OF], reflect.Slice) { - for _, v := range m[KEY_ANY_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.AddAnyOf(newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_ALL_OF) { - if isKind(m[KEY_ALL_OF], reflect.Slice) { - for _, v := range m[KEY_ALL_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.AddAllOf(newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_NOT) { - if isKind(m[KEY_NOT], reflect.Map) { - newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} - currentSchema.SetNot(newSchema) - err := d.parseSchema(m[KEY_NOT], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, - )) - } - } - - return nil -} - -func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) error { - var refdDocumentNode interface{} - jsonPointer := currentSchema.ref.GetPointer() - standaloneDocument := d.pool.GetStandaloneDocument() - - if standaloneDocument != nil { - - var err error - refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument) - if err != nil { - return err - } - - } else { - dsp, err := d.pool.GetDocument(*currentSchema.ref) - if err != nil { - return err - } - - refdDocumentNode, _, err = jsonPointer.Get(dsp.Document) - if err != nil { - return err - } - - } - - if !isKind(refdDocumentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, - )) - } - - // returns the loaded referenced subSchema for the caller to update its current subSchema - newSchemaDocument := refdDocumentNode.(map[string]interface{}) - newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} - d.referencePool.Add(currentSchema.ref.String()+reference, newSchema) - - err := d.parseSchema(newSchemaDocument, newSchema) - if err != nil { - return err - } - - currentSchema.refSchema = newSchema - - return nil - -} - -func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, - )) - } - - m := documentNode.(map[string]interface{}) - for k := range m { - schemaProperty := k - newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} - currentSchema.AddPropertiesChild(newSchema) - err := d.parseSchema(m[k], newSchema) - if err != nil { - return err - } - } - - return nil -} - -func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, - )) - } - - m := documentNode.(map[string]interface{}) - currentSchema.dependencies = make(map[string]interface{}) - - for k := range m { - switch reflect.ValueOf(m[k]).Kind() { - - case reflect.Slice: - values := m[k].([]interface{}) - var valuesToRegister []string - - for _, value := range values { - if !isKind(value, reflect.String) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": STRING_DEPENDENCY, - "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, - }, - )) - } else { - valuesToRegister = append(valuesToRegister, value.(string)) - } - currentSchema.dependencies[k] = valuesToRegister - } - - case reflect.Map: - depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} - err := d.parseSchema(m[k], depSchema) - if err != nil { - return err - } - currentSchema.dependencies[k] = depSchema - - default: - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": STRING_DEPENDENCY, - "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, - }, - )) - } - - } - - return nil -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/schemaPool.go b/vendor/github.com/TykTechnologies/gojsonschema/schemaPool.go deleted file mode 100644 index f2ad641a..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/schemaPool.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines resources pooling. -// Eases referencing and avoids downloading the same resource twice. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "errors" - - "github.com/xeipuuv/gojsonreference" -) - -type schemaPoolDocument struct { - Document interface{} -} - -type schemaPool struct { - schemaPoolDocuments map[string]*schemaPoolDocument - standaloneDocument interface{} - jsonLoaderFactory JSONLoaderFactory -} - -func newSchemaPool(f JSONLoaderFactory) *schemaPool { - - p := &schemaPool{} - p.schemaPoolDocuments = make(map[string]*schemaPoolDocument) - p.standaloneDocument = nil - p.jsonLoaderFactory = f - - return p -} - -func (p *schemaPool) SetStandaloneDocument(document interface{}) { - p.standaloneDocument = document -} - -func (p *schemaPool) GetStandaloneDocument() (document interface{}) { - return p.standaloneDocument -} - -func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { - - if internalLogEnabled { - internalLog("Get Document ( %s )", reference.String()) - } - - var err error - - // It is not possible to load anything that is not canonical... - if !reference.IsCanonical() { - return nil, errors.New(formatErrorDescription( - Locale.ReferenceMustBeCanonical(), - ErrorDetails{"reference": reference}, - )) - } - - refToUrl := reference - refToUrl.GetUrl().Fragment = "" - - var spd *schemaPoolDocument - - // Try to find the requested document in the pool - for k := range p.schemaPoolDocuments { - if k == refToUrl.String() { - spd = p.schemaPoolDocuments[k] - } - } - - if spd != nil { - if internalLogEnabled { - internalLog(" From pool") - } - return spd, nil - } - - jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) - document, err := jsonReferenceLoader.LoadJSON() - if err != nil { - return nil, err - } - - spd = &schemaPoolDocument{Document: document} - // add the document to the pool for potential later use - p.schemaPoolDocuments[refToUrl.String()] = spd - - return spd, nil -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/schemaReferencePool.go b/vendor/github.com/TykTechnologies/gojsonschema/schemaReferencePool.go deleted file mode 100644 index 294e36a7..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/schemaReferencePool.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Pool of referenced schemas. -// -// created 25-06-2013 - -package gojsonschema - -import ( - "fmt" -) - -type schemaReferencePool struct { - documents map[string]*subSchema -} - -func newSchemaReferencePool() *schemaReferencePool { - - p := &schemaReferencePool{} - p.documents = make(map[string]*subSchema) - - return p -} - -func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { - - if internalLogEnabled { - internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) - } - - if sch, ok := p.documents[ref]; ok { - if internalLogEnabled { - internalLog(fmt.Sprintf(" From pool")) - } - return sch, true - } - - return nil, false -} - -func (p *schemaReferencePool) Add(ref string, sch *subSchema) { - - if internalLogEnabled { - internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) - } - - p.documents[ref] = sch -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/schemaType.go b/vendor/github.com/TykTechnologies/gojsonschema/schemaType.go deleted file mode 100644 index e13a0fb0..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/schemaType.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Helper structure to handle schema types, and the combination of them. -// -// created 28-02-2013 - -package gojsonschema - -import ( - "errors" - "fmt" - "strings" -) - -type jsonSchemaType struct { - types []string -} - -// Is the schema typed ? that is containing at least one type -// When not typed, the schema does not need any type validation -func (t *jsonSchemaType) IsTyped() bool { - return len(t.types) > 0 -} - -func (t *jsonSchemaType) Add(etype string) error { - - if !isStringInSlice(JSON_TYPES, etype) { - return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"type": etype})) - } - - if t.Contains(etype) { - return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) - } - - t.types = append(t.types, etype) - - return nil -} - -func (t *jsonSchemaType) Contains(etype string) bool { - - for _, v := range t.types { - if v == etype { - return true - } - } - - return false -} - -func (t *jsonSchemaType) String() string { - - if len(t.types) == 0 { - return STRING_UNDEFINED // should never happen - } - - // Displayed as a list [type1,type2,...] - if len(t.types) > 1 { - return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) - } - - // Only one type: name only - return t.types[0] -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/subSchema.go b/vendor/github.com/TykTechnologies/gojsonschema/subSchema.go deleted file mode 100644 index 75abc5f5..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/subSchema.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines the structure of a sub-subSchema. -// A sub-subSchema can contain other sub-schemas. -// -// created 27-02-2013 - -package gojsonschema - -import ( - "errors" - "regexp" - "strings" - - "github.com/xeipuuv/gojsonreference" -) - -const ( - KEY_SCHEMA = "$subSchema" - KEY_ID = "$id" - KEY_REF = "$ref" - KEY_TITLE = "title" - KEY_DESCRIPTION = "description" - KEY_TYPE = "type" - KEY_ITEMS = "items" - KEY_ADDITIONAL_ITEMS = "additionalItems" - KEY_PROPERTIES = "properties" - KEY_PATTERN_PROPERTIES = "patternProperties" - KEY_ADDITIONAL_PROPERTIES = "additionalProperties" - KEY_DEFINITIONS = "definitions" - KEY_MULTIPLE_OF = "multipleOf" - KEY_MINIMUM = "minimum" - KEY_MAXIMUM = "maximum" - KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" - KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" - KEY_MIN_LENGTH = "minLength" - KEY_MAX_LENGTH = "maxLength" - KEY_MIN_NUMERIC = "minNumeric" - KEY_MIN_SPECIAL = "minSpecial" - KEY_MULTI_CASE = "multiCase" - KEY_DISABLE_SEQUENTIAL = "disableSequential" - KEY_PATTERN = "pattern" - KEY_FORMAT = "format" - KEY_MIN_PROPERTIES = "minProperties" - KEY_MAX_PROPERTIES = "maxProperties" - KEY_DEPENDENCIES = "dependencies" - KEY_REQUIRED = "required" - KEY_MIN_ITEMS = "minItems" - KEY_MAX_ITEMS = "maxItems" - KEY_UNIQUE_ITEMS = "uniqueItems" - KEY_ENUM = "enum" - KEY_ONE_OF = "oneOf" - KEY_ANY_OF = "anyOf" - KEY_ALL_OF = "allOf" - KEY_NOT = "not" -) - -type subSchema struct { - - // basic subSchema meta properties - id *string - title *string - description *string - - property string - - // Types associated with the subSchema - types jsonSchemaType - - // Reference url - ref *gojsonreference.JsonReference - // Schema referenced - refSchema *subSchema - // Json reference - subSchema *gojsonreference.JsonReference - - // hierarchy - parent *subSchema - definitions map[string]*subSchema - definitionsChildren []*subSchema - itemsChildren []*subSchema - itemsChildrenIsSingleSchema bool - propertiesChildren []*subSchema - - // validation : number / integer - multipleOf *float64 - maximum *float64 - exclusiveMaximum bool - minimum *float64 - exclusiveMinimum bool - - // validation : string - minLength *int - maxLength *int - minNumeric *int - minSpecial *int - multiCase bool - disableSequential bool - pattern *regexp.Regexp - format string - - // validation : object - minProperties *int - maxProperties *int - required []string - - dependencies map[string]interface{} - additionalProperties interface{} - patternProperties map[string]*subSchema - - // validation : array - minItems *int - maxItems *int - uniqueItems bool - - additionalItems interface{} - - // validation : all - enum []string - - // validation : subSchema - oneOf []*subSchema - anyOf []*subSchema - allOf []*subSchema - not *subSchema -} - -func (s *subSchema) AddEnum(i interface{}) error { - - is, err := marshalToJsonString(i) - if err != nil { - return err - } - - if isStringInSlice(s.enum, *is) { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeUnique(), - ErrorDetails{"key": KEY_ENUM}, - )) - } - - s.enum = append(s.enum, *is) - - return nil -} - -func (s *subSchema) ContainsEnum(i interface{}) (bool, error) { - - is, err := marshalToJsonString(i) - if err != nil { - return false, err - } - - return isStringInSlice(s.enum, *is), nil -} - -func (s *subSchema) AddOneOf(subSchema *subSchema) { - s.oneOf = append(s.oneOf, subSchema) -} - -func (s *subSchema) AddAllOf(subSchema *subSchema) { - s.allOf = append(s.allOf, subSchema) -} - -func (s *subSchema) AddAnyOf(subSchema *subSchema) { - s.anyOf = append(s.anyOf, subSchema) -} - -func (s *subSchema) SetNot(subSchema *subSchema) { - s.not = subSchema -} - -func (s *subSchema) AddRequired(value string) error { - - if isStringInSlice(s.required, value) { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeUnique(), - ErrorDetails{"key": KEY_REQUIRED}, - )) - } - - s.required = append(s.required, value) - - return nil -} - -func (s *subSchema) AddDefinitionChild(child *subSchema) { - s.definitionsChildren = append(s.definitionsChildren, child) -} - -func (s *subSchema) AddItemsChild(child *subSchema) { - s.itemsChildren = append(s.itemsChildren, child) -} - -func (s *subSchema) AddPropertiesChild(child *subSchema) { - s.propertiesChildren = append(s.propertiesChildren, child) -} - -func (s *subSchema) PatternPropertiesString() string { - - if s.patternProperties == nil || len(s.patternProperties) == 0 { - return STRING_UNDEFINED // should never happen - } - - patternPropertiesKeySlice := []string{} - for pk := range s.patternProperties { - patternPropertiesKeySlice = append(patternPropertiesKeySlice, `"`+pk+`"`) - } - - if len(patternPropertiesKeySlice) == 1 { - return patternPropertiesKeySlice[0] - } - - return "[" + strings.Join(patternPropertiesKeySlice, ",") + "]" - -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/types.go b/vendor/github.com/TykTechnologies/gojsonschema/types.go deleted file mode 100644 index 952d22ef..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/types.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Contains const types for schema and JSON. -// -// created 28-02-2013 - -package gojsonschema - -const ( - TYPE_ARRAY = `array` - TYPE_BOOLEAN = `boolean` - TYPE_INTEGER = `integer` - TYPE_NUMBER = `number` - TYPE_NULL = `null` - TYPE_OBJECT = `object` - TYPE_STRING = `string` -) - -var JSON_TYPES []string -var SCHEMA_TYPES []string - -func init() { - JSON_TYPES = []string{ - TYPE_ARRAY, - TYPE_BOOLEAN, - TYPE_INTEGER, - TYPE_NUMBER, - TYPE_NULL, - TYPE_OBJECT, - TYPE_STRING} - - SCHEMA_TYPES = []string{ - TYPE_ARRAY, - TYPE_BOOLEAN, - TYPE_INTEGER, - TYPE_NUMBER, - TYPE_OBJECT, - TYPE_STRING} -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/utils.go b/vendor/github.com/TykTechnologies/gojsonschema/utils.go deleted file mode 100644 index 26cf75eb..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/utils.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Various utility functions. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "encoding/json" - "fmt" - "math" - "reflect" - "strconv" -) - -func isKind(what interface{}, kind reflect.Kind) bool { - target := what - if isJsonNumber(what) { - // JSON Numbers are strings! - target = *mustBeNumber(what) - } - return reflect.ValueOf(target).Kind() == kind -} - -func existsMapKey(m map[string]interface{}, k string) bool { - _, ok := m[k] - return ok -} - -func isStringInSlice(s []string, what string) bool { - for i := range s { - if s[i] == what { - return true - } - } - return false -} - -func marshalToJsonString(value interface{}) (*string, error) { - - mBytes, err := json.Marshal(value) - if err != nil { - return nil, err - } - - sBytes := string(mBytes) - return &sBytes, nil -} - -func isJsonNumber(what interface{}) bool { - - switch what.(type) { - - case json.Number: - return true - } - - return false -} - -func checkJsonNumber(what interface{}) (isValidFloat64 bool, isValidInt64 bool, isValidInt32 bool) { - - jsonNumber := what.(json.Number) - - f64, errFloat64 := jsonNumber.Float64() - s64 := strconv.FormatFloat(f64, 'f', -1, 64) - _, errInt64 := strconv.ParseInt(s64, 10, 64) - - isValidFloat64 = errFloat64 == nil - isValidInt64 = errInt64 == nil - - _, errInt32 := strconv.ParseInt(s64, 10, 32) - isValidInt32 = isValidInt64 && errInt32 == nil - - return - -} - -// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER -const ( - max_json_float = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - min_json_float = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 -) - -func isFloat64AnInteger(f float64) bool { - - if math.IsNaN(f) || math.IsInf(f, 0) || f < min_json_float || f > max_json_float { - return false - } - - return f == float64(int64(f)) || f == float64(uint64(f)) -} - -func mustBeInteger(what interface{}) *int { - - if isJsonNumber(what) { - - number := what.(json.Number) - - _, _, isValidInt32 := checkJsonNumber(number) - - if isValidInt32 { - - int64Value, err := number.Int64() - if err != nil { - return nil - } - - int32Value := int(int64Value) - return &int32Value - - } else { - return nil - } - - } - - return nil -} - -func mustBeNumber(what interface{}) *float64 { - - if isJsonNumber(what) { - - number := what.(json.Number) - float64Value, err := number.Float64() - - if err == nil { - return &float64Value - } else { - return nil - } - - } - - return nil - -} - -// formats a number so that it is displayed as the smallest string possible -func resultErrorFormatJsonNumber(n json.Number) string { - - if int64Value, err := n.Int64(); err == nil { - return fmt.Sprintf("%d", int64Value) - } - - float64Value, _ := n.Float64() - - return fmt.Sprintf("%g", float64Value) -} - -// formats a number so that it is displayed as the smallest string possible -func resultErrorFormatNumber(n float64) string { - - if isFloat64AnInteger(n) { - return fmt.Sprintf("%d", int64(n)) - } - - return fmt.Sprintf("%g", n) -} - -func convertDocumentNode(val interface{}) interface{} { - - if lval, ok := val.([]interface{}); ok { - - res := []interface{}{} - for _, v := range lval { - res = append(res, convertDocumentNode(v)) - } - - return res - - } - - if mval, ok := val.(map[interface{}]interface{}); ok { - - res := map[string]interface{}{} - - for k, v := range mval { - res[k.(string)] = convertDocumentNode(v) - } - - return res - - } - - return val -} diff --git a/vendor/github.com/TykTechnologies/gojsonschema/validation.go b/vendor/github.com/TykTechnologies/gojsonschema/validation.go deleted file mode 100644 index 597e0a1c..00000000 --- a/vendor/github.com/TykTechnologies/gojsonschema/validation.go +++ /dev/null @@ -1,901 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Extends Schema and subSchema, implements the validation phase. -// -// created 28-02-2013 - -package gojsonschema - -import ( - "encoding/json" - "reflect" - "regexp" - "strconv" - "strings" - "unicode/utf8" -) - -func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { - - var err error - - // load schema - - schema, err := NewSchema(ls) - if err != nil { - return nil, err - } - - // begine validation - - return schema.Validate(ld) - -} - -func (v *Schema) Validate(l JSONLoader) (*Result, error) { - - // load document - - root, err := l.LoadJSON() - if err != nil { - return nil, err - } - - // begin validation - - result := &Result{} - context := newJsonContext(STRING_CONTEXT_ROOT, nil) - v.rootSchema.validateRecursive(v.rootSchema, root, result, context) - - return result, nil - -} - -func (v *subSchema) subValidateWithContext(document interface{}, context *jsonContext) *Result { - result := &Result{} - v.validateRecursive(v, document, result, context) - return result -} - -// Walker function to validate the json recursively against the subSchema -func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { - - if internalLogEnabled { - internalLog("validateRecursive %s", context.String()) - internalLog(" %v", currentNode) - } - - // Handle referenced schemas, returns directly when a $ref is found - if currentSubSchema.refSchema != nil { - v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) - return - } - - // Check for null value - if currentNode == nil { - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_NULL, - }, - ) - return - } - - currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) - v.validateCommon(currentSubSchema, currentNode, result, context) - - } else { // Not a null value - - if isJsonNumber(currentNode) { - - value := currentNode.(json.Number) - - _, isValidInt64, _ := checkJsonNumber(value) - - validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isValidInt64 && currentSubSchema.types.Contains(TYPE_INTEGER)) - - if currentSubSchema.types.IsTyped() && !validType { - - givenType := TYPE_INTEGER - if !isValidInt64 { - givenType = TYPE_NUMBER - } - - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": givenType, - }, - ) - return - } - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - } else { - - rValue := reflect.ValueOf(currentNode) - rKind := rValue.Kind() - - switch rKind { - - // Slice => JSON array - - case reflect.Slice: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_ARRAY, - }, - ) - return - } - - castCurrentNode := currentNode.([]interface{}) - - currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) - - v.validateArray(currentSubSchema, castCurrentNode, result, context) - v.validateCommon(currentSubSchema, castCurrentNode, result, context) - - // Map => JSON object - - case reflect.Map: - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_OBJECT, - }, - ) - return - } - - castCurrentNode, ok := currentNode.(map[string]interface{}) - if !ok { - castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) - } - - currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) - - v.validateObject(currentSubSchema, castCurrentNode, result, context) - v.validateCommon(currentSubSchema, castCurrentNode, result, context) - - for _, pSchema := range currentSubSchema.propertiesChildren { - nextNode, ok := castCurrentNode[pSchema.property] - if ok { - subContext := newJsonContext(pSchema.property, context) - v.validateRecursive(pSchema, nextNode, result, subContext) - } - } - - // Simple JSON values : string, number, boolean - - case reflect.Bool: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_BOOLEAN, - }, - ) - return - } - - value := currentNode.(bool) - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - case reflect.String: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_STRING, - }, - ) - return - } - - value := currentNode.(string) - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - } - - } - - } - - result.incrementScore() -} - -// Different kinds of validation there, subSchema / common / array / object / string... -func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { - - if internalLogEnabled { - internalLog("validateSchema %s", context.String()) - internalLog(" %v", currentNode) - } - - if len(currentSubSchema.anyOf) > 0 { - - validatedAnyOf := false - var bestValidationResult *Result - - for _, anyOfSchema := range currentSubSchema.anyOf { - if !validatedAnyOf { - validationResult := anyOfSchema.subValidateWithContext(currentNode, context) - validatedAnyOf = validationResult.Valid() - - if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { - bestValidationResult = validationResult - } - } - } - if !validatedAnyOf { - - result.addError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) - - if bestValidationResult != nil { - // add error messages of closest matching subSchema as - // that's probably the one the user was trying to match - result.mergeErrors(bestValidationResult) - } - } - } - - if len(currentSubSchema.oneOf) > 0 { - - nbValidated := 0 - var bestValidationResult *Result - - for _, oneOfSchema := range currentSubSchema.oneOf { - validationResult := oneOfSchema.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - nbValidated++ - } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { - bestValidationResult = validationResult - } - } - - if nbValidated != 1 { - - result.addError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) - - if nbValidated == 0 { - // add error messages of closest matching subSchema as - // that's probably the one the user was trying to match - result.mergeErrors(bestValidationResult) - } - } - - } - - if len(currentSubSchema.allOf) > 0 { - nbValidated := 0 - - for _, allOfSchema := range currentSubSchema.allOf { - validationResult := allOfSchema.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - nbValidated++ - } - result.mergeErrors(validationResult) - } - - if nbValidated != len(currentSubSchema.allOf) { - result.addError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) - } - } - - if currentSubSchema.not != nil { - validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - result.addError(new(NumberNotError), context, currentNode, ErrorDetails{}) - } - } - - if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { - if isKind(currentNode, reflect.Map) { - for elementKey := range currentNode.(map[string]interface{}) { - if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { - switch dependency := dependency.(type) { - - case []string: - for _, dependOnKey := range dependency { - if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { - result.addError( - new(MissingDependencyError), - context, - currentNode, - ErrorDetails{"dependency": dependOnKey}, - ) - } - } - - case *subSchema: - dependency.validateRecursive(dependency, currentNode, result, context) - - } - } - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { - - if internalLogEnabled { - internalLog("validateCommon %s", context.String()) - internalLog(" %v", value) - } - - // enum: - if len(currentSubSchema.enum) > 0 { - has, err := currentSubSchema.ContainsEnum(value) - if err != nil { - result.addError(new(InternalError), context, value, ErrorDetails{"error": err}) - } - if !has { - result.addError( - new(EnumError), - context, - value, - ErrorDetails{ - "allowed": strings.Join(currentSubSchema.enum, ", "), - }, - ) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *jsonContext) { - - if internalLogEnabled { - internalLog("validateArray %s", context.String()) - internalLog(" %v", value) - } - - nbValues := len(value) - - // TODO explain - if currentSubSchema.itemsChildrenIsSingleSchema { - for i := range value { - subContext := newJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } else { - if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { - - nbItems := len(currentSubSchema.itemsChildren) - - // while we have both schemas and values, check them against each other - for i := 0; i != nbItems && i != nbValues; i++ { - subContext := newJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - - if nbItems < nbValues { - // we have less schemas than elements in the instance array, - // but that might be ok if "additionalItems" is specified. - - switch currentSubSchema.additionalItems.(type) { - case bool: - if !currentSubSchema.additionalItems.(bool) { - result.addError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) - } - case *subSchema: - additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) - for i := nbItems; i != nbValues; i++ { - subContext := newJsonContext(strconv.Itoa(i), context) - validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } - } - } - } - - // minItems & maxItems - if currentSubSchema.minItems != nil { - if nbValues < int(*currentSubSchema.minItems) { - result.addError( - new(ArrayMinItemsError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minItems}, - ) - } - } - if currentSubSchema.maxItems != nil { - if nbValues > int(*currentSubSchema.maxItems) { - result.addError( - new(ArrayMaxItemsError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxItems}, - ) - } - } - - // uniqueItems: - if currentSubSchema.uniqueItems { - var stringifiedItems []string - for _, v := range value { - vString, err := marshalToJsonString(v) - if err != nil { - result.addError(new(InternalError), context, value, ErrorDetails{"err": err}) - } - if isStringInSlice(stringifiedItems, *vString) { - result.addError( - new(ItemsMustBeUniqueError), - context, - value, - ErrorDetails{"type": TYPE_ARRAY}, - ) - } - stringifiedItems = append(stringifiedItems, *vString) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *jsonContext) { - - if internalLogEnabled { - internalLog("validateObject %s", context.String()) - internalLog(" %v", value) - } - - // minProperties & maxProperties: - if currentSubSchema.minProperties != nil { - if len(value) < int(*currentSubSchema.minProperties) { - result.addError( - new(ArrayMinPropertiesError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minProperties}, - ) - } - } - if currentSubSchema.maxProperties != nil { - if len(value) > int(*currentSubSchema.maxProperties) { - result.addError( - new(ArrayMaxPropertiesError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxProperties}, - ) - } - } - - // required: - for _, requiredProperty := range currentSubSchema.required { - _, ok := value[requiredProperty] - if ok { - result.incrementScore() - } else { - result.addError( - new(RequiredError), - context, - value, - ErrorDetails{"property": requiredProperty}, - ) - } - } - - // additionalProperty & patternProperty: - if currentSubSchema.additionalProperties != nil { - - switch currentSubSchema.additionalProperties.(type) { - case bool: - - if !currentSubSchema.additionalProperties.(bool) { - - for pk := range value { - - found := false - for _, spValue := range currentSubSchema.propertiesChildren { - if pk == spValue.property { - found = true - } - } - - pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) - - if found { - - if pp_has && !pp_match { - result.addError( - new(AdditionalPropertyNotAllowedError), - context, - value, - ErrorDetails{"property": pk}, - ) - } - - } else { - - if !pp_has || !pp_match { - result.addError( - new(AdditionalPropertyNotAllowedError), - context, - value, - ErrorDetails{"property": pk}, - ) - } - - } - } - } - - case *subSchema: - - additionalPropertiesSchema := currentSubSchema.additionalProperties.(*subSchema) - for pk := range value { - - found := false - for _, spValue := range currentSubSchema.propertiesChildren { - if pk == spValue.property { - found = true - } - } - - pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) - - if found { - - if pp_has && !pp_match { - validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context) - result.mergeErrors(validationResult) - } - - } else { - - if !pp_has || !pp_match { - validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context) - result.mergeErrors(validationResult) - } - - } - - } - } - } else { - - for pk := range value { - - pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) - - if pp_has && !pp_match { - - result.addError( - new(InvalidPropertyPatternError), - context, - value, - ErrorDetails{ - "property": pk, - "pattern": currentSubSchema.PatternPropertiesString(), - }, - ) - } - - } - } - - result.incrementScore() -} - -func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *jsonContext) (has bool, matched bool) { - - if internalLogEnabled { - internalLog("validatePatternProperty %s", context.String()) - internalLog(" %s %v", key, value) - } - - has = false - - validatedkey := false - - for pk, pv := range currentSubSchema.patternProperties { - if matches, _ := regexp.MatchString(pk, key); matches { - has = true - subContext := newJsonContext(key, context) - validationResult := pv.subValidateWithContext(value, subContext) - result.mergeErrors(validationResult) - if validationResult.Valid() { - validatedkey = true - } - } - } - - if !validatedkey { - return has, false - } - - result.incrementScore() - - return has, true -} - -func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { - - // Ignore JSON numbers - if isJsonNumber(value) { - return - } - - // Ignore non strings - if !isKind(value, reflect.String) { - return - } - - if internalLogEnabled { - internalLog("validateString %s", context.String()) - internalLog(" %v", value) - } - - stringValue := value.(string) - - // minLength & maxLength: - if currentSubSchema.minLength != nil { - if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { - result.addError( - new(StringLengthGTEError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minLength}, - ) - } - } - if currentSubSchema.maxLength != nil { - if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { - result.addError( - new(StringLengthLTEError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxLength}, - ) - } - } - - if currentSubSchema.minNumeric != nil { - re := regexp.MustCompile(`\pN`) - - if len(re.FindAllString(stringValue, -1)) < int(*currentSubSchema.minNumeric) { - result.addError( - new(StringNumericGTEError), - context, - value, - ErrorDetails{"min_numeric": *currentSubSchema.minNumeric}, - ) - } - } - - if currentSubSchema.minSpecial != nil { - re := regexp.MustCompile(`[^\pN\pL]`) - - if len(re.FindAllString(stringValue, -1)) < int(*currentSubSchema.minSpecial) { - result.addError( - new(StringSpecialGTEError), - context, - value, - ErrorDetails{"min_special": *currentSubSchema.minSpecial}, - ) - } - } - - if currentSubSchema.multiCase { - // See http://www.regular-expressions.info/unicode.html on unicode regexp docs - reL := regexp.MustCompile(`[\p{Ll}\pN]`) - reU := regexp.MustCompile(`\p{Lu}`) - - if len(reL.FindAllString(stringValue, -1)) == 0 || len(reU.FindAllString(stringValue, -1)) == 0 { - result.addError( - new(StringMultiCaseError), - context, - value, - ErrorDetails{"multi_case": currentSubSchema.multiCase}, - ) - } - } - - if currentSubSchema.disableSequential { - re := regexp.MustCompile("(?i)(abc|bcd|cde|def|efg|fgh|ghi|hij|ijk|jkl|klm|lmn|mno|nop|opq|pqr|qrs|rst|stu|tuv|uvw|vwx|wxy|xyz|012|123|234|345|456|567|678|789)") - - var seq []string - - for i := range stringValue { - if i < 2 { - continue - } - - if stringValue[i-2] == stringValue[i-1] && stringValue[i-1] == stringValue[i] { - seq = append(seq, stringValue[i-2:i]) - } - } - - m := re.FindAllString(stringValue, -1) - allM := append(seq, m...) - - if len(allM) > 0 { - result.addError( - new(StringSequentialError), - context, - value, - ErrorDetails{"sequential_chars": strings.Join(allM, ", ")}, - ) - } - } - - // pattern: - if currentSubSchema.pattern != nil { - if !currentSubSchema.pattern.MatchString(stringValue) { - result.addError( - new(DoesNotMatchPatternError), - context, - value, - ErrorDetails{"pattern": currentSubSchema.pattern}, - ) - - } - } - - // format - if currentSubSchema.format != "" { - if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { - result.addError( - new(DoesNotMatchFormatError), - context, - value, - ErrorDetails{"format": currentSubSchema.format}, - ) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { - - // Ignore non numbers - if !isJsonNumber(value) { - return - } - - if internalLogEnabled { - internalLog("validateNumber %s", context.String()) - internalLog(" %v", value) - } - - number := value.(json.Number) - float64Value, _ := number.Float64() - - // multipleOf: - if currentSubSchema.multipleOf != nil { - - if !isFloat64AnInteger(float64Value / *currentSubSchema.multipleOf) { - result.addError( - new(MultipleOfError), - context, - resultErrorFormatJsonNumber(number), - ErrorDetails{"multiple": *currentSubSchema.multipleOf}, - ) - } - } - - //maximum & exclusiveMaximum: - if currentSubSchema.maximum != nil { - if currentSubSchema.exclusiveMaximum { - if float64Value >= *currentSubSchema.maximum { - result.addError( - new(NumberLTError), - context, - resultErrorFormatJsonNumber(number), - ErrorDetails{ - "max": resultErrorFormatNumber(*currentSubSchema.maximum), - }, - ) - } - } else { - if float64Value > *currentSubSchema.maximum { - result.addError( - new(NumberLTEError), - context, - resultErrorFormatJsonNumber(number), - ErrorDetails{ - "max": resultErrorFormatNumber(*currentSubSchema.maximum), - }, - ) - } - } - } - - //minimum & exclusiveMinimum: - if currentSubSchema.minimum != nil { - if currentSubSchema.exclusiveMinimum { - if float64Value <= *currentSubSchema.minimum { - result.addError( - new(NumberGTError), - context, - resultErrorFormatJsonNumber(number), - ErrorDetails{ - "min": resultErrorFormatNumber(*currentSubSchema.minimum), - }, - ) - } - } else { - if float64Value < *currentSubSchema.minimum { - result.addError( - new(NumberGTEError), - context, - resultErrorFormatJsonNumber(number), - ErrorDetails{ - "min": resultErrorFormatNumber(*currentSubSchema.minimum), - }, - ) - } - } - } - - result.incrementScore() -} diff --git a/vendor/github.com/TykTechnologies/goverify/README.md b/vendor/github.com/TykTechnologies/goverify/README.md deleted file mode 100644 index c1b3659d..00000000 --- a/vendor/github.com/TykTechnologies/goverify/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# RSA Verifier - -This lib makes it easy to verify a string with a signature using an RSA public/private key combination. - -Shamelessly ripped off from SO: - -http://stackoverflow.com/questions/20655702/signing-and-decoding-with-rsa-sha-in-go - -And this play example: - -https://play.golang.org/p/bzpD7Pa9mr \ No newline at end of file diff --git a/vendor/github.com/TykTechnologies/goverify/goverify.go b/vendor/github.com/TykTechnologies/goverify/goverify.go deleted file mode 100644 index ae9372c3..00000000 --- a/vendor/github.com/TykTechnologies/goverify/goverify.go +++ /dev/null @@ -1,3 +0,0 @@ -package goverify - - diff --git a/vendor/github.com/TykTechnologies/goverify/rsa_signer.go b/vendor/github.com/TykTechnologies/goverify/rsa_signer.go deleted file mode 100644 index af98c67a..00000000 --- a/vendor/github.com/TykTechnologies/goverify/rsa_signer.go +++ /dev/null @@ -1,20 +0,0 @@ -package goverify - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" -) - -type RSAPrivateKey struct { - *rsa.PrivateKey -} - -// Sign signs data with rsa-sha256 -func (r *RSAPrivateKey) Sign(data []byte) ([]byte, error) { - h := sha256.New() - h.Write(data) - d := h.Sum(nil) - return rsa.SignPKCS1v15(rand.Reader, r.PrivateKey, crypto.SHA256, d) -} diff --git a/vendor/github.com/TykTechnologies/goverify/rsa_verifier.go b/vendor/github.com/TykTechnologies/goverify/rsa_verifier.go deleted file mode 100644 index 7c033f10..00000000 --- a/vendor/github.com/TykTechnologies/goverify/rsa_verifier.go +++ /dev/null @@ -1,19 +0,0 @@ -package goverify - -import ( - "crypto" - "crypto/rsa" - "crypto/sha256" -) - -type RSAPublicKey struct { - *rsa.PublicKey -} - -// Unsign verifies the message using a rsa-sha256 signature -func (r *RSAPublicKey) Verify(message []byte, sig []byte) error { - h := sha256.New() - h.Write(message) - d := h.Sum(nil) - return rsa.VerifyPKCS1v15(r.PublicKey, crypto.SHA256, d, sig) -} diff --git a/vendor/github.com/TykTechnologies/goverify/signer.go b/vendor/github.com/TykTechnologies/goverify/signer.go deleted file mode 100644 index 940b88f1..00000000 --- a/vendor/github.com/TykTechnologies/goverify/signer.go +++ /dev/null @@ -1,24 +0,0 @@ -package goverify - -import ( - "crypto/rsa" - "fmt" -) - -// A Signer is can create signatures that verify against a public key. -type Signer interface { - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. - Sign(data []byte) ([]byte, error) -} - -func newSignerFromKey(k interface{}) (Signer, error) { - var sshKey Signer - switch t := k.(type) { - case *rsa.PrivateKey: - sshKey = &RSAPrivateKey{t} - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", k) - } - return sshKey, nil -} diff --git a/vendor/github.com/TykTechnologies/goverify/util.go b/vendor/github.com/TykTechnologies/goverify/util.go deleted file mode 100644 index 864a6ff4..00000000 --- a/vendor/github.com/TykTechnologies/goverify/util.go +++ /dev/null @@ -1,83 +0,0 @@ -package goverify - -import ( - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" -) - -// loadPrivateKey loads an parses a PEM encoded private key file. -func LoadPublicKeyFromFile(path string) (Verifier, error) { - dat, err := ioutil.ReadFile(path) - - if err != nil { - return nil, err - } - - return parsePublicKey(dat) -} - -func LoadPublicKeyFromString(key string) (Verifier, error) { - - return parsePublicKey([]byte(key)) -} - -// parsePublicKey parses a PEM encoded private key. -func parsePublicKey(pemBytes []byte) (Verifier, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - var rawkey interface{} - switch block.Type { - case "PUBLIC KEY": - rsa, err := x509.ParsePKIXPublicKey(block.Bytes) - if err != nil { - return nil, err - } - rawkey = rsa - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } - - return newVerifierFromKey(rawkey) -} - -// loadPrivateKey loads an parses a PEM encoded private key file. -func LoadPrivateKeyFromFile(path string) (Signer, error) { - dat, err := ioutil.ReadFile(path) - - if err != nil { - return nil, err - } - - return parsePrivateKey(dat) -} - -func LoadPrivateKeyFromString(key string) (Signer, error) { - return parsePrivateKey([]byte(key)) -} - -// parsePublicKey parses a PEM encoded private key. -func parsePrivateKey(pemBytes []byte) (Signer, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - var rawkey interface{} - switch block.Type { - case "RSA PRIVATE KEY": - rsa, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, err - } - rawkey = rsa - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } - return newSignerFromKey(rawkey) -} diff --git a/vendor/github.com/TykTechnologies/goverify/verifier.go b/vendor/github.com/TykTechnologies/goverify/verifier.go deleted file mode 100644 index 409766ec..00000000 --- a/vendor/github.com/TykTechnologies/goverify/verifier.go +++ /dev/null @@ -1,24 +0,0 @@ -package goverify - -import ( - "crypto/rsa" - "fmt" -) - -// A Verifier is can validate signatures that verify against a public key. -type Verifier interface { - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. - Verify(data []byte, sig []byte) error -} - -func newVerifierFromKey(k interface{}) (Verifier, error) { - var sshKey Verifier - switch t := k.(type) { - case *rsa.PublicKey: - sshKey = &RSAPublicKey{t} - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", k) - } - return sshKey, nil -} diff --git a/vendor/github.com/TykTechnologies/serverless/provider/aws/provider.go b/vendor/github.com/TykTechnologies/serverless/provider/aws/provider.go deleted file mode 100644 index 52e41e77..00000000 --- a/vendor/github.com/TykTechnologies/serverless/provider/aws/provider.go +++ /dev/null @@ -1,112 +0,0 @@ -package aws - -import ( - "fmt" - "github.com/TykTechnologies/serverless/provider" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/external" - "github.com/aws/aws-sdk-go-v2/service/lambda" - "github.com/pkg/errors" - "reflect" -) - -type AWSConf aws.Config - -func init() { - fmt.Println("registering AWS lambda") - provider.RegisterProvider("aws-lambda", NewProvider) -} - -func NewProvider() (provider.Provider, error) { - return &Provider{}, nil -} - -type Provider struct { - aws.Config -} - -func (p *Provider) Init(conf provider.Conf) error { - - c, ok := conf.(*AWSConf) - if !ok { - return fmt.Errorf("unable to resolve conf type: %v", reflect.TypeOf(conf)) - } - - p.Region = c.Region - - awsCfg, err := external.LoadDefaultAWSConfig() - if err != nil { - return errors.Wrap(err, provider.ErrorLoadingDriverConfig) - } - - awsCfg.Region = c.Region - p.Config = awsCfg - - fmt.Println(awsCfg.Region) - - return nil -} - -func (p Provider) List() ([]provider.Function, error) { - - service := lambda.New(p.Config) - - listFunctionsRequest := service.ListFunctionsRequest(nil) - listFunctionsOutput, err := listFunctionsRequest.Send() - - if err != nil { - return nil, errors.Wrap(err, provider.ErrorListingFunctions) - } - - //lfoJs, _ := json.Marshal(listFunctionsOutput.Functions) - //logrus.Debug(string(lfoJs)) - - functions := make([]provider.Function, 0) - for _, f := range listFunctionsOutput.Functions { - - myFunc := provider.Function{ - Name: aws.StringValue(f.FunctionName), - Version: aws.StringValue(f.Version), - } - - functions = append(functions, myFunc) - } - - return functions, nil -} - -func (p Provider) Invoke(function provider.Function, requestBody []byte) (*provider.Response, error) { - - service := lambda.New(p.Config) - - if function.GetVersion() == "" { - function.SetVersion("$LATEST") - } - - input := lambda.InvokeInput{ - //ClientContext: aws.String("index"), // need to investigate context - FunctionName: aws.String(function.GetName()), - //InvocationType: lambda.InvocationTypeEvent, // To make async - LogType: lambda.LogTypeTail, - Payload: requestBody, - Qualifier: aws.String(function.GetVersion()), - } - - request := service.InvokeRequest(&input) - - lambdaRes, err := request.Send() - if err != nil { - return nil, errors.Wrap(err, provider.ErrorInvokingFunction) - } - - res := provider.Response{ - StatusCode: int(aws.Int64Value(lambdaRes.StatusCode)), - Body: lambdaRes.Payload, - } - - if lambdaRes.FunctionError != nil { - res.Error = errors.New(aws.StringValue(lambdaRes.FunctionError)) - } - - return &res, nil -} diff --git a/vendor/github.com/TykTechnologies/serverless/provider/provider.go b/vendor/github.com/TykTechnologies/serverless/provider/provider.go deleted file mode 100644 index 0346ffbd..00000000 --- a/vendor/github.com/TykTechnologies/serverless/provider/provider.go +++ /dev/null @@ -1,73 +0,0 @@ -package provider - -import "errors" - -const ( - ErrorLoadingDriverConfig = "unable to load driver config" - ErrorListingFunctions = "unable to list functions" - ErrorInvokingFunction = "unable to invoke function" -) - -type Conf interface{} - -type Function struct { - Name string - Version string -} - -func (f Function) GetName() string { - return f.Name -} - -func (f Function) GetVersion() string { - return f.Version -} - -func (f *Function) SetVersion(v string) { - f.Version = v -} - -type Response struct { - Body []byte - StatusCode int - Error error -} - -func (r Response) GetBody() []byte { - return r.Body -} - -type Initializer interface { - Init(Conf) error -} - -type Lister interface { - List() ([]Function, error) -} - -type Invoker interface { - Invoke(detail Function, body []byte) (*Response, error) -} - -type Provider interface { - Lister - Invoker - Initializer -} - -type NewProviderFunc func() (Provider, error) - -var providerRegister = map[string]NewProviderFunc{} - -func RegisterProvider(name string, newFunc NewProviderFunc) { - providerRegister[name] = newFunc -} - -func GetProvider(name string) (Provider, error) { - nf, ok := providerRegister[name] - if !ok { - return nil, errors.New("not found") - } - - return nf() -} diff --git a/vendor/github.com/TykTechnologies/tyk/LICENSE.md b/vendor/github.com/TykTechnologies/tyk/LICENSE.md deleted file mode 100644 index 771dfbcd..00000000 --- a/vendor/github.com/TykTechnologies/tyk/LICENSE.md +++ /dev/null @@ -1,184 +0,0 @@ -# Mozilla Public License Version 2.0 - -## 1. Definitions - -### 1.1. “Contributor” -means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. - -### 1.2. “Contributor Version” -means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. - -### 1.3. “Contribution” -means Covered Software of a particular Contributor. - -### 1.4. “Covered Software” -means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. - -### 1.5. “Incompatible With Secondary Licenses” -means - -* a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or -* b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. - -### 1.6. “Executable Form” -means any form of the work other than Source Code Form. - -### 1.7. “Larger Work” -means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. - -### 1.8. “License” -means this document. - -### 1.9. “Licensable” -means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. - -### 1.10. “Modifications” -means any of the following: - -* a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or -* b. any new file in Source Code Form that contains any Covered Software. - -### 1.11. “Patent Claims” of a Contributor -means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. - -### 1.12. “Secondary License” -means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. - -### 1.13. “Source Code Form” -means the form of the work preferred for making modifications. - -### 1.14. “You” (or “Your”) -means an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -## 2. License Grants and Conditions - -### 2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - -* a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and -* b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. - -### 2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. - -### 2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: - -* a. for any code that a Contributor has removed from Covered Software; or -* b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or -* c. under Patent Claims infringed by Covered Software in the absence of its Contributions. - -This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). - -### 2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). - -### 2.5. Representation - -Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. - -### 2.6. Fair Use - -This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. - -### 2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. - - -## 3. Responsibilities - -### 3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. - -### 3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -* a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - -* b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. - -### 3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). - -### 3.4. Notices - -You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. - -### 3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. - - -## 4. Inability to Comply Due to Statute or Regulation - -If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. - - -## 5. Termination - -5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. - - -## 6. Disclaimer of Warranty - -**Covered Software is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer.** - - -## 7. Limitation of Liability - -**Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You.** - - -## 8. Litigation - -Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. - - -## 9. Miscellaneous - -This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. - - -## 10. Versions of the License - -### 10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. - -### 10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. - -### 10.3. Modified Versions - -If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). - -### 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - -If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. - - -## Exhibit A - Source Code Form License Notice - -> This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - - -## Exhibit B - “Incompatible With Secondary Licenses” Notice - -> This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/TykTechnologies/tyk/apidef/api_definitions.go b/vendor/github.com/TykTechnologies/tyk/apidef/api_definitions.go deleted file mode 100644 index 71bb68be..00000000 --- a/vendor/github.com/TykTechnologies/tyk/apidef/api_definitions.go +++ /dev/null @@ -1,833 +0,0 @@ -package apidef - -import ( - "encoding/base64" - "encoding/json" - "encoding/xml" - "text/template" - - "github.com/clbanning/mxj" - - "github.com/lonelycode/osin" - "gopkg.in/mgo.v2/bson" - - "time" - - "github.com/TykTechnologies/gojsonschema" - "github.com/TykTechnologies/tyk/regexp" -) - -type AuthProviderCode string -type SessionProviderCode string -type StorageEngineCode string -type TykEvent string // A type so we can ENUM event types easily, e.g. EventQuotaExceeded -type TykEventHandlerName string // A type for handler codes in API definitions - -type EndpointMethodAction string -type TemplateMode string - -type MiddlewareDriver string -type IdExtractorSource string -type IdExtractorType string -type AuthTypeEnum string -type RoutingTriggerOnType string - -const ( - NoAction EndpointMethodAction = "no_action" - Reply EndpointMethodAction = "reply" - - UseBlob TemplateMode = "blob" - UseFile TemplateMode = "file" - - RequestXML RequestInputType = "xml" - RequestJSON RequestInputType = "json" - - OttoDriver MiddlewareDriver = "otto" - PythonDriver MiddlewareDriver = "python" - LuaDriver MiddlewareDriver = "lua" - GrpcDriver MiddlewareDriver = "grpc" - GoPluginDriver MiddlewareDriver = "goplugin" - - BodySource IdExtractorSource = "body" - HeaderSource IdExtractorSource = "header" - QuerystringSource IdExtractorSource = "querystring" - FormSource IdExtractorSource = "form" - - ValueExtractor IdExtractorType = "value" - XPathExtractor IdExtractorType = "xpath" - RegexExtractor IdExtractorType = "regex" - - // For multi-type auth - AuthToken AuthTypeEnum = "auth_token" - HMACKey AuthTypeEnum = "hmac_key" - BasicAuthUser AuthTypeEnum = "basic_auth_user" - JWTClaim AuthTypeEnum = "jwt_claim" - OIDCUser AuthTypeEnum = "oidc_user" - OAuthKey AuthTypeEnum = "oauth_key" - UnsetAuth AuthTypeEnum = "" - - // For routing triggers - All RoutingTriggerOnType = "all" - Any RoutingTriggerOnType = "any" - Ignore RoutingTriggerOnType = "" -) - -type EndpointMethodMeta struct { - Action EndpointMethodAction `bson:"action" json:"action"` - Code int `bson:"code" json:"code"` - Data string `bson:"data" json:"data"` - Headers map[string]string `bson:"headers" json:"headers"` -} - -type EndPointMeta struct { - Path string `bson:"path" json:"path"` - IgnoreCase bool `bson:"ignore_case" json:"ignore_case"` - MethodActions map[string]EndpointMethodMeta `bson:"method_actions" json:"method_actions"` -} - -type CacheMeta struct { - Method string `bson:"method" json:"method"` - Path string `bson:"path" json:"path"` - CacheKeyRegex string `bson:"cache_key_regex" json:"cache_key_regex"` -} - -type RequestInputType string - -type TemplateData struct { - Input RequestInputType `bson:"input_type" json:"input_type"` - Mode TemplateMode `bson:"template_mode" json:"template_mode"` - EnableSession bool `bson:"enable_session" json:"enable_session"` - TemplateSource string `bson:"template_source" json:"template_source"` -} - -type TemplateMeta struct { - TemplateData TemplateData `bson:"template_data" json:"template_data"` - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` -} - -type TransformJQMeta struct { - Filter string `bson:"filter" json:"filter"` - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` -} - -type HeaderInjectionMeta struct { - DeleteHeaders []string `bson:"delete_headers" json:"delete_headers"` - AddHeaders map[string]string `bson:"add_headers" json:"add_headers"` - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` - ActOnResponse bool `bson:"act_on" json:"act_on"` -} - -type HardTimeoutMeta struct { - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` - TimeOut int `bson:"timeout" json:"timeout"` -} - -type TrackEndpointMeta struct { - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` -} - -type InternalMeta struct { - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` -} - -type RequestSizeMeta struct { - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` - SizeLimit int64 `bson:"size_limit" json:"size_limit"` -} - -type CircuitBreakerMeta struct { - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` - ThresholdPercent float64 `bson:"threshold_percent" json:"threshold_percent"` - Samples int64 `bson:"samples" json:"samples"` - ReturnToServiceAfter int `bson:"return_to_service_after" json:"return_to_service_after"` -} - -type StringRegexMap struct { - MatchPattern string `bson:"match_rx" json:"match_rx"` - Reverse bool `bson:"reverse" json:"reverse"` - matchRegex *regexp.Regexp -} - -type RoutingTriggerOptions struct { - HeaderMatches map[string]StringRegexMap `bson:"header_matches" json:"header_matches"` - QueryValMatches map[string]StringRegexMap `bson:"query_val_matches" json:"query_val_matches"` - PathPartMatches map[string]StringRegexMap `bson:"path_part_matches" json:"path_part_matches"` - SessionMetaMatches map[string]StringRegexMap `bson:"session_meta_matches" json:"session_meta_matches"` - RequestContextMatches map[string]StringRegexMap `bson:"request_context_matches" json:"request_context_matches"` - PayloadMatches StringRegexMap `bson:"payload_matches" json:"payload_matches"` -} - -type RoutingTrigger struct { - On RoutingTriggerOnType `bson:"on" json:"on"` - Options RoutingTriggerOptions `bson:"options" json:"options"` - RewriteTo string `bson:"rewrite_to" json:"rewrite_to"` -} - -type URLRewriteMeta struct { - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` - MatchPattern string `bson:"match_pattern" json:"match_pattern"` - RewriteTo string `bson:"rewrite_to" json:"rewrite_to"` - Triggers []RoutingTrigger `bson:"triggers" json:"triggers"` - MatchRegexp *regexp.Regexp `json:"-"` -} - -type VirtualMeta struct { - ResponseFunctionName string `bson:"response_function_name" json:"response_function_name"` - FunctionSourceType string `bson:"function_source_type" json:"function_source_type"` - FunctionSourceURI string `bson:"function_source_uri" json:"function_source_uri"` - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` - UseSession bool `bson:"use_session" json:"use_session"` - ProxyOnError bool `bson:"proxy_on_error" json:"proxy_on_error"` -} - -type MethodTransformMeta struct { - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` - ToMethod string `bson:"to_method" json:"to_method"` -} - -type ValidatePathMeta struct { - Path string `bson:"path" json:"path"` - Method string `bson:"method" json:"method"` - Schema map[string]interface{} `bson:"schema" json:"schema"` - SchemaB64 string `bson:"schema_b64" json:"schema_b64,omitempty"` - SchemaCache gojsonschema.JSONLoader `bson:"-" json:"-"` - // Allows override of default 422 Unprocessible Entity response code for validation errors. - ErrorResponseCode int `bson:"error_response_code" json:"error_response_code"` -} - -type ExtendedPathsSet struct { - Ignored []EndPointMeta `bson:"ignored" json:"ignored,omitempty"` - WhiteList []EndPointMeta `bson:"white_list" json:"white_list,omitempty"` - BlackList []EndPointMeta `bson:"black_list" json:"black_list,omitempty"` - Cached []string `bson:"cache" json:"cache,omitempty"` - AdvanceCacheConfig []CacheMeta `bson:"advance_cache_config" json:"advance_cache_config,omitempty"` - Transform []TemplateMeta `bson:"transform" json:"transform,omitempty"` - TransformResponse []TemplateMeta `bson:"transform_response" json:"transform_response,omitempty"` - TransformJQ []TransformJQMeta `bson:"transform_jq" json:"transform_jq,omitempty"` - TransformJQResponse []TransformJQMeta `bson:"transform_jq_response" json:"transform_jq_response,omitempty"` - TransformHeader []HeaderInjectionMeta `bson:"transform_headers" json:"transform_headers,omitempty"` - TransformResponseHeader []HeaderInjectionMeta `bson:"transform_response_headers" json:"transform_response_headers,omitempty"` - HardTimeouts []HardTimeoutMeta `bson:"hard_timeouts" json:"hard_timeouts,omitempty"` - CircuitBreaker []CircuitBreakerMeta `bson:"circuit_breakers" json:"circuit_breakers,omitempty"` - URLRewrite []URLRewriteMeta `bson:"url_rewrites" json:"url_rewrites,omitempty"` - Virtual []VirtualMeta `bson:"virtual" json:"virtual,omitempty"` - SizeLimit []RequestSizeMeta `bson:"size_limits" json:"size_limits,omitempty"` - MethodTransforms []MethodTransformMeta `bson:"method_transforms" json:"method_transforms,omitempty"` - TrackEndpoints []TrackEndpointMeta `bson:"track_endpoints" json:"track_endpoints,omitempty"` - DoNotTrackEndpoints []TrackEndpointMeta `bson:"do_not_track_endpoints" json:"do_not_track_endpoints,omitempty"` - ValidateJSON []ValidatePathMeta `bson:"validate_json" json:"validate_json,omitempty"` - Internal []InternalMeta `bson:"internal" json:"internal,omitempty"` -} - -type VersionInfo struct { - Name string `bson:"name" json:"name"` - Expires string `bson:"expires" json:"expires"` - ExpiresTs time.Time `bson:"-" json:"-"` - Paths struct { - Ignored []string `bson:"ignored" json:"ignored"` - WhiteList []string `bson:"white_list" json:"white_list"` - BlackList []string `bson:"black_list" json:"black_list"` - } `bson:"paths" json:"paths"` - UseExtendedPaths bool `bson:"use_extended_paths" json:"use_extended_paths"` - ExtendedPaths ExtendedPathsSet `bson:"extended_paths" json:"extended_paths"` - GlobalHeaders map[string]string `bson:"global_headers" json:"global_headers"` - GlobalHeadersRemove []string `bson:"global_headers_remove" json:"global_headers_remove"` - GlobalSizeLimit int64 `bson:"global_size_limit" json:"global_size_limit"` - OverrideTarget string `bson:"override_target" json:"override_target"` -} - -type AuthProviderMeta struct { - Name AuthProviderCode `bson:"name" json:"name"` - StorageEngine StorageEngineCode `bson:"storage_engine" json:"storage_engine"` - Meta map[string]interface{} `bson:"meta" json:"meta"` -} - -type SessionProviderMeta struct { - Name SessionProviderCode `bson:"name" json:"name"` - StorageEngine StorageEngineCode `bson:"storage_engine" json:"storage_engine"` - Meta map[string]interface{} `bson:"meta" json:"meta"` -} - -type EventHandlerTriggerConfig struct { - Handler TykEventHandlerName `bson:"handler_name" json:"handler_name"` - HandlerMeta map[string]interface{} `bson:"handler_meta" json:"handler_meta"` -} - -type EventHandlerMetaConfig struct { - Events map[TykEvent][]EventHandlerTriggerConfig `bson:"events" json:"events"` -} - -type MiddlewareDefinition struct { - Name string `bson:"name" json:"name"` - Path string `bson:"path" json:"path"` - RequireSession bool `bson:"require_session" json:"require_session"` - RawBodyOnly bool `bson:"raw_body_only" json:"raw_body_only"` -} - -type MiddlewareIdExtractor struct { - ExtractFrom IdExtractorSource `bson:"extract_from" json:"extract_from"` - ExtractWith IdExtractorType `bson:"extract_with" json:"extract_with"` - ExtractorConfig map[string]interface{} `bson:"extractor_config" json:"extractor_config"` - Extractor interface{} `bson:"-" json:"-"` -} - -type MiddlewareSection struct { - Pre []MiddlewareDefinition `bson:"pre" json:"pre"` - Post []MiddlewareDefinition `bson:"post" json:"post"` - PostKeyAuth []MiddlewareDefinition `bson:"post_key_auth" json:"post_key_auth"` - AuthCheck MiddlewareDefinition `bson:"auth_check" json:"auth_check"` - Response []MiddlewareDefinition `bson:"response" json:"response"` - Driver MiddlewareDriver `bson:"driver" json:"driver"` - IdExtractor MiddlewareIdExtractor `bson:"id_extractor" json:"id_extractor"` -} - -type CacheOptions struct { - CacheTimeout int64 `bson:"cache_timeout" json:"cache_timeout"` - EnableCache bool `bson:"enable_cache" json:"enable_cache"` - CacheAllSafeRequests bool `bson:"cache_all_safe_requests" json:"cache_all_safe_requests"` - CacheOnlyResponseCodes []int `bson:"cache_response_codes" json:"cache_response_codes"` - EnableUpstreamCacheControl bool `bson:"enable_upstream_cache_control" json:"enable_upstream_cache_control"` - CacheControlTTLHeader string `bson:"cache_control_ttl_header" json:"cache_control_ttl_header"` -} - -type ResponseProcessor struct { - Name string `bson:"name" json:"name"` - Options interface{} `bson:"options" json:"options"` -} - -type HostCheckObject struct { - CheckURL string `bson:"url" json:"url"` - Protocol string `bson:"protocol" json:"protocol"` - Timeout time.Duration `bson:"timeout" json:"timeout"` - EnableProxyProtocol bool `bson:"enable_proxy_protocol" json:"enable_proxy_protocol"` - Commands []CheckCommand `bson:"commands" json:"commands"` - Method string `bson:"method" json:"method"` - Headers map[string]string `bson:"headers" json:"headers"` - Body string `bson:"body" json:"body"` -} - -type CheckCommand struct { - Name string `bson:"name" json:"name"` - Message string `bson:"message" json:"message"` -} - -type ServiceDiscoveryConfiguration struct { - UseDiscoveryService bool `bson:"use_discovery_service" json:"use_discovery_service"` - QueryEndpoint string `bson:"query_endpoint" json:"query_endpoint"` - UseNestedQuery bool `bson:"use_nested_query" json:"use_nested_query"` - ParentDataPath string `bson:"parent_data_path" json:"parent_data_path"` - DataPath string `bson:"data_path" json:"data_path"` - PortDataPath string `bson:"port_data_path" json:"port_data_path"` - TargetPath string `bson:"target_path" json:"target_path"` - UseTargetList bool `bson:"use_target_list" json:"use_target_list"` - CacheTimeout int64 `bson:"cache_timeout" json:"cache_timeout"` - EndpointReturnsList bool `bson:"endpoint_returns_list" json:"endpoint_returns_list"` -} - -type OIDProviderConfig struct { - Issuer string `bson:"issuer" json:"issuer"` - ClientIDs map[string]string `bson:"client_ids" json:"client_ids"` -} - -type OpenIDOptions struct { - Providers []OIDProviderConfig `bson:"providers" json:"providers"` - SegregateByClient bool `bson:"segregate_by_client" json:"segregate_by_client"` -} - -// APIDefinition represents the configuration for a single proxied API and it's versions. -// -// swagger:model -type APIDefinition struct { - Id bson.ObjectId `bson:"_id,omitempty" json:"id,omitempty"` - Name string `bson:"name" json:"name"` - Slug string `bson:"slug" json:"slug"` - ListenPort int `bson:"listen_port" json:"listen_port"` - Protocol string `bson:"protocol" json:"protocol"` - EnableProxyProtocol bool `bson:"enable_proxy_protocol" json:"enable_proxy_protocol"` - APIID string `bson:"api_id" json:"api_id"` - OrgID string `bson:"org_id" json:"org_id"` - UseKeylessAccess bool `bson:"use_keyless" json:"use_keyless"` - UseOauth2 bool `bson:"use_oauth2" json:"use_oauth2"` - UseOpenID bool `bson:"use_openid" json:"use_openid"` - OpenIDOptions OpenIDOptions `bson:"openid_options" json:"openid_options"` - Oauth2Meta struct { - AllowedAccessTypes []osin.AccessRequestType `bson:"allowed_access_types" json:"allowed_access_types"` - AllowedAuthorizeTypes []osin.AuthorizeRequestType `bson:"allowed_authorize_types" json:"allowed_authorize_types"` - AuthorizeLoginRedirect string `bson:"auth_login_redirect" json:"auth_login_redirect"` - } `bson:"oauth_meta" json:"oauth_meta"` - Auth AuthConfig `bson:"auth" json:"auth"` // Deprecated: Use AuthConfigs instead. - AuthConfigs map[string]AuthConfig `bson:"auth_configs" json:"auth_configs"` - UseBasicAuth bool `bson:"use_basic_auth" json:"use_basic_auth"` - BasicAuth struct { - DisableCaching bool `bson:"disable_caching" json:"disable_caching"` - CacheTTL int `bson:"cache_ttl" json:"cache_ttl"` - ExtractFromBody bool `bson:"extract_from_body" json:"extract_from_body"` - BodyUserRegexp string `bson:"body_user_regexp" json:"body_user_regexp"` - BodyPasswordRegexp string `bson:"body_password_regexp" json:"body_password_regexp"` - } `bson:"basic_auth" json:"basic_auth"` - UseMutualTLSAuth bool `bson:"use_mutual_tls_auth" json:"use_mutual_tls_auth"` - ClientCertificates []string `bson:"client_certificates" json:"client_certificates"` - UpstreamCertificates map[string]string `bson:"upstream_certificates" json:"upstream_certificates"` - PinnedPublicKeys map[string]string `bson:"pinned_public_keys" json:"pinned_public_keys"` - EnableJWT bool `bson:"enable_jwt" json:"enable_jwt"` - UseStandardAuth bool `bson:"use_standard_auth" json:"use_standard_auth"` - UseGoPluginAuth bool `bson:"use_go_plugin_auth" json:"use_go_plugin_auth"` - EnableCoProcessAuth bool `bson:"enable_coprocess_auth" json:"enable_coprocess_auth"` - JWTSigningMethod string `bson:"jwt_signing_method" json:"jwt_signing_method"` - JWTSource string `bson:"jwt_source" json:"jwt_source"` - JWTIdentityBaseField string `bson:"jwt_identit_base_field" json:"jwt_identity_base_field"` - JWTClientIDBaseField string `bson:"jwt_client_base_field" json:"jwt_client_base_field"` - JWTPolicyFieldName string `bson:"jwt_policy_field_name" json:"jwt_policy_field_name"` - JWTDefaultPolicies []string `bson:"jwt_default_policies" json:"jwt_default_policies"` - JWTIssuedAtValidationSkew uint64 `bson:"jwt_issued_at_validation_skew" json:"jwt_issued_at_validation_skew"` - JWTExpiresAtValidationSkew uint64 `bson:"jwt_expires_at_validation_skew" json:"jwt_expires_at_validation_skew"` - JWTNotBeforeValidationSkew uint64 `bson:"jwt_not_before_validation_skew" json:"jwt_not_before_validation_skew"` - JWTSkipKid bool `bson:"jwt_skip_kid" json:"jwt_skip_kid"` - JWTScopeToPolicyMapping map[string]string `bson:"jwt_scope_to_policy_mapping" json:"jwt_scope_to_policy_mapping"` - JWTScopeClaimName string `bson:"jwt_scope_claim_name" json:"jwt_scope_claim_name"` - NotificationsDetails NotificationsManager `bson:"notifications" json:"notifications"` - EnableSignatureChecking bool `bson:"enable_signature_checking" json:"enable_signature_checking"` - HmacAllowedClockSkew float64 `bson:"hmac_allowed_clock_skew" json:"hmac_allowed_clock_skew"` - HmacAllowedAlgorithms []string `bson:"hmac_allowed_algorithms" json:"hmac_allowed_algorithms"` - RequestSigning RequestSigningMeta `bson:"request_signing" json:"request_signing"` - BaseIdentityProvidedBy AuthTypeEnum `bson:"base_identity_provided_by" json:"base_identity_provided_by"` - VersionDefinition struct { - Location string `bson:"location" json:"location"` - Key string `bson:"key" json:"key"` - StripPath bool `bson:"strip_path" json:"strip_path"` - } `bson:"definition" json:"definition"` - VersionData struct { - NotVersioned bool `bson:"not_versioned" json:"not_versioned"` - DefaultVersion string `bson:"default_version" json:"default_version"` - Versions map[string]VersionInfo `bson:"versions" json:"versions"` - } `bson:"version_data" json:"version_data"` - UptimeTests struct { - CheckList []HostCheckObject `bson:"check_list" json:"check_list"` - Config struct { - ExpireUptimeAnalyticsAfter int64 `bson:"expire_utime_after" json:"expire_utime_after"` // must have an expireAt TTL index set (http://docs.mongodb.org/manual/tutorial/expire-data/) - ServiceDiscovery ServiceDiscoveryConfiguration `bson:"service_discovery" json:"service_discovery"` - RecheckWait int `bson:"recheck_wait" json:"recheck_wait"` - } `bson:"config" json:"config"` - } `bson:"uptime_tests" json:"uptime_tests"` - Proxy struct { - PreserveHostHeader bool `bson:"preserve_host_header" json:"preserve_host_header"` - ListenPath string `bson:"listen_path" json:"listen_path"` - TargetURL string `bson:"target_url" json:"target_url"` - DisableStripSlash bool `bson:"disable_strip_slash" json:"disable_strip_slash"` - StripListenPath bool `bson:"strip_listen_path" json:"strip_listen_path"` - EnableLoadBalancing bool `bson:"enable_load_balancing" json:"enable_load_balancing"` - Targets []string `bson:"target_list" json:"target_list"` - StructuredTargetList *HostList `bson:"-" json:"-"` - CheckHostAgainstUptimeTests bool `bson:"check_host_against_uptime_tests" json:"check_host_against_uptime_tests"` - ServiceDiscovery ServiceDiscoveryConfiguration `bson:"service_discovery" json:"service_discovery"` - Transport struct { - SSLInsecureSkipVerify bool `bson:"ssl_insecure_skip_verify" json:"ssl_insecure_skip_verify"` - SSLCipherSuites []string `bson:"ssl_ciphers" json:"ssl_ciphers"` - SSLMinVersion uint16 `bson:"ssl_min_version" json:"ssl_min_version"` - SSLForceCommonNameCheck bool `json:"ssl_force_common_name_check"` - ProxyURL string `bson:"proxy_url" json:"proxy_url"` - } `bson:"transport" json:"transport"` - } `bson:"proxy" json:"proxy"` - DisableRateLimit bool `bson:"disable_rate_limit" json:"disable_rate_limit"` - DisableQuota bool `bson:"disable_quota" json:"disable_quota"` - CustomMiddleware MiddlewareSection `bson:"custom_middleware" json:"custom_middleware"` - CustomMiddlewareBundle string `bson:"custom_middleware_bundle" json:"custom_middleware_bundle"` - CacheOptions CacheOptions `bson:"cache_options" json:"cache_options"` - SessionLifetime int64 `bson:"session_lifetime" json:"session_lifetime"` - Active bool `bson:"active" json:"active"` - Internal bool `bson:"internal" json:"internal"` - AuthProvider AuthProviderMeta `bson:"auth_provider" json:"auth_provider"` - SessionProvider SessionProviderMeta `bson:"session_provider" json:"session_provider"` - EventHandlers EventHandlerMetaConfig `bson:"event_handlers" json:"event_handlers"` - EnableBatchRequestSupport bool `bson:"enable_batch_request_support" json:"enable_batch_request_support"` - EnableIpWhiteListing bool `mapstructure:"enable_ip_whitelisting" bson:"enable_ip_whitelisting" json:"enable_ip_whitelisting"` - AllowedIPs []string `mapstructure:"allowed_ips" bson:"allowed_ips" json:"allowed_ips"` - EnableIpBlacklisting bool `mapstructure:"enable_ip_blacklisting" bson:"enable_ip_blacklisting" json:"enable_ip_blacklisting"` - BlacklistedIPs []string `mapstructure:"blacklisted_ips" bson:"blacklisted_ips" json:"blacklisted_ips"` - DontSetQuotasOnCreate bool `mapstructure:"dont_set_quota_on_create" bson:"dont_set_quota_on_create" json:"dont_set_quota_on_create"` - ExpireAnalyticsAfter int64 `mapstructure:"expire_analytics_after" bson:"expire_analytics_after" json:"expire_analytics_after"` // must have an expireAt TTL index set (http://docs.mongodb.org/manual/tutorial/expire-data/) - ResponseProcessors []ResponseProcessor `bson:"response_processors" json:"response_processors"` - CORS struct { - Enable bool `bson:"enable" json:"enable"` - AllowedOrigins []string `bson:"allowed_origins" json:"allowed_origins"` - AllowedMethods []string `bson:"allowed_methods" json:"allowed_methods"` - AllowedHeaders []string `bson:"allowed_headers" json:"allowed_headers"` - ExposedHeaders []string `bson:"exposed_headers" json:"exposed_headers"` - AllowCredentials bool `bson:"allow_credentials" json:"allow_credentials"` - MaxAge int `bson:"max_age" json:"max_age"` - OptionsPassthrough bool `bson:"options_passthrough" json:"options_passthrough"` - Debug bool `bson:"debug" json:"debug"` - } `bson:"CORS" json:"CORS"` - Domain string `bson:"domain" json:"domain"` - Certificates []string `bson:"certificates" json:"certificates"` - DoNotTrack bool `bson:"do_not_track" json:"do_not_track"` - Tags []string `bson:"tags" json:"tags"` - EnableContextVars bool `bson:"enable_context_vars" json:"enable_context_vars"` - ConfigData map[string]interface{} `bson:"config_data" json:"config_data"` - TagHeaders []string `bson:"tag_headers" json:"tag_headers"` - GlobalRateLimit GlobalRateLimit `bson:"global_rate_limit" json:"global_rate_limit"` - StripAuthData bool `bson:"strip_auth_data" json:"strip_auth_data"` - EnableDetailedRecording bool `bson:"enable_detailed_recording" json:"enable_detailed_recording"` -} - -type AuthConfig struct { - UseParam bool `mapstructure:"use_param" bson:"use_param" json:"use_param"` - ParamName string `mapstructure:"param_name" bson:"param_name" json:"param_name"` - UseCookie bool `mapstructure:"use_cookie" bson:"use_cookie" json:"use_cookie"` - CookieName string `mapstructure:"cookie_name" bson:"cookie_name" json:"cookie_name"` - AuthHeaderName string `mapstructure:"auth_header_name" bson:"auth_header_name" json:"auth_header_name"` - UseCertificate bool `mapstructure:"use_certificate" bson:"use_certificate" json:"use_certificate"` - ValidateSignature bool `mapstructure:"validate_signature" bson:"validate_signature" json:"validate_signature"` - Signature SignatureConfig `mapstructure:"signature" bson:"signature" json:"signature,omitempty"` -} - -type SignatureConfig struct { - Algorithm string `mapstructure:"algorithm" bson:"algorithm" json:"algorithm"` - Header string `mapstructure:"header" bson:"header" json:"header"` - Secret string `mapstructure:"secret" bson:"secret" json:"secret"` - AllowedClockSkew int64 `mapstructure:"allowed_clock_skew" bson:"allowed_clock_skew" json:"allowed_clock_skew"` - ErrorCode int `mapstructure:"error_code" bson:"error_code" json:"error_code"` - ErrorMessage string `mapstructure:"error_message" bson:"error_message" json:"error_message"` -} - -type GlobalRateLimit struct { - Rate float64 `bson:"rate" json:"rate"` - Per float64 `bson:"per" json:"per"` -} - -type BundleManifest struct { - FileList []string `bson:"file_list" json:"file_list"` - CustomMiddleware MiddlewareSection `bson:"custom_middleware" json:"custom_middleware"` - Checksum string `bson:"checksum" json:"checksum"` - Signature string `bson:"signature" json:"signature"` -} - -type RequestSigningMeta struct { - IsEnabled bool `bson:"is_enabled" json:"is_enabled"` - Secret string `bson:"secret" json:"secret"` - KeyId string `bson:"key_id" json:"key_id"` - Algorithm string `bson:"algorithm" json:"algorithm"` - HeaderList []string `bson:"header_list" json:"header_list"` - CertificateId string `bson:"certificate_id" json:"certificate_id"` - SignatureHeader string `bson:"signature_header" json:"signature_header"` -} - -// Clean will URL encode map[string]struct variables for saving -func (a *APIDefinition) EncodeForDB() { - newVersion := make(map[string]VersionInfo) - for k, v := range a.VersionData.Versions { - newK := base64.StdEncoding.EncodeToString([]byte(k)) - v.Name = newK - newVersion[newK] = v - } - a.VersionData.Versions = newVersion - - newUpstreamCerts := make(map[string]string) - for domain, cert := range a.UpstreamCertificates { - newD := base64.StdEncoding.EncodeToString([]byte(domain)) - newUpstreamCerts[newD] = cert - } - a.UpstreamCertificates = newUpstreamCerts - - newPinnedPublicKeys := make(map[string]string) - for domain, cert := range a.PinnedPublicKeys { - newD := base64.StdEncoding.EncodeToString([]byte(domain)) - newPinnedPublicKeys[newD] = cert - } - a.PinnedPublicKeys = newPinnedPublicKeys - - for i, version := range a.VersionData.Versions { - for j, oldSchema := range version.ExtendedPaths.ValidateJSON { - - jsBytes, _ := json.Marshal(oldSchema.Schema) - oldSchema.SchemaB64 = base64.StdEncoding.EncodeToString(jsBytes) - oldSchema.Schema = nil - - a.VersionData.Versions[i].ExtendedPaths.ValidateJSON[j] = oldSchema - } - } - - // Auth is deprecated so this code tries to maintain backward compatibility - if a.Auth.AuthHeaderName == "" { - a.Auth = a.AuthConfigs["authToken"] - } -} - -func (a *APIDefinition) DecodeFromDB() { - newVersion := make(map[string]VersionInfo) - for k, v := range a.VersionData.Versions { - newK, err := base64.StdEncoding.DecodeString(k) - if err != nil { - log.Error("Couldn't Decode, leaving as it may be legacy...") - newVersion[k] = v - } else { - v.Name = string(newK) - newVersion[string(newK)] = v - } - } - a.VersionData.Versions = newVersion - - newUpstreamCerts := make(map[string]string) - for domain, cert := range a.UpstreamCertificates { - newD, err := base64.StdEncoding.DecodeString(domain) - if err != nil { - log.Error("Couldn't Decode, leaving as it may be legacy...") - newUpstreamCerts[domain] = cert - } else { - newUpstreamCerts[string(newD)] = cert - } - } - a.UpstreamCertificates = newUpstreamCerts - - newPinnedPublicKeys := make(map[string]string) - for domain, cert := range a.PinnedPublicKeys { - newD, err := base64.StdEncoding.DecodeString(domain) - if err != nil { - log.Error("Couldn't Decode, leaving as it may be legacy...") - newPinnedPublicKeys[domain] = cert - } else { - newPinnedPublicKeys[string(newD)] = cert - } - } - a.PinnedPublicKeys = newPinnedPublicKeys - - for i, version := range a.VersionData.Versions { - for j, oldSchema := range version.ExtendedPaths.ValidateJSON { - jsBytes, _ := base64.StdEncoding.DecodeString(oldSchema.SchemaB64) - - json.Unmarshal(jsBytes, &oldSchema.Schema) - oldSchema.SchemaB64 = "" - - a.VersionData.Versions[i].ExtendedPaths.ValidateJSON[j] = oldSchema - } - } - - // Auth is deprecated so this code tries to maintain backward compatibility - makeCompatible := func(authType string) { - if a.AuthConfigs == nil { - a.AuthConfigs = make(map[string]AuthConfig) - } - - _, ok := a.AuthConfigs[authType] - - if !ok { - a.AuthConfigs[authType] = a.Auth - } - } - - makeCompatible("authToken") - makeCompatible("jwt") -} - -func (s *StringRegexMap) Check(value string) (match string) { - if s.matchRegex == nil { - return - } - - return s.matchRegex.FindString(value) -} - -func (s *StringRegexMap) FindStringSubmatch(value string) (matched bool, match []string) { - if s.matchRegex == nil { - return - } - - match = s.matchRegex.FindStringSubmatch(value) - if !s.Reverse { - matched = len(match) > 0 - } else { - matched = len(match) == 0 - } - - return -} - -func (s *StringRegexMap) FindAllStringSubmatch(value string, n int) (matched bool, matches [][]string) { - matches = s.matchRegex.FindAllStringSubmatch(value, n) - if !s.Reverse { - matched = len(matches) > 0 - } else { - matched = len(matches) == 0 - } - - return -} - -func (s *StringRegexMap) Init() error { - var err error - if s.matchRegex, err = regexp.Compile(s.MatchPattern); err != nil { - log.WithError(err).WithField("MatchPattern", s.MatchPattern). - Error("Could not compile matchRegex for StringRegexMap") - return err - } - - return nil -} - -func DummyAPI() APIDefinition { - endpointMeta := EndPointMeta{ - Path: "abc", - MethodActions: map[string]EndpointMethodMeta{ - "GET": { - Action: Reply, - Code: 200, - Data: "testdata", - Headers: map[string]string{"header": "value"}, - }, - }, - } - templateMeta := TemplateMeta{ - TemplateData: TemplateData{Input: RequestJSON, Mode: UseBlob}, - } - transformJQMeta := TransformJQMeta{ - Filter: "filter", - Path: "path", - Method: "method", - } - headerInjectionMeta := HeaderInjectionMeta{ - DeleteHeaders: []string{"header1", "header2"}, - AddHeaders: map[string]string{}, - Path: "path", - Method: "method", - } - hardTimeoutMeta := HardTimeoutMeta{Path: "path", Method: "method", TimeOut: 0} - circuitBreakerMeta := CircuitBreakerMeta{ - Path: "path", - Method: "method", - ThresholdPercent: 0.0, - Samples: 0, - ReturnToServiceAfter: 0, - } - // TODO: Extend triggers - urlRewriteMeta := URLRewriteMeta{ - Path: "", - Method: "method", - MatchPattern: "matchpattern", - RewriteTo: "rewriteto", - Triggers: []RoutingTrigger{}, - } - virtualMeta := VirtualMeta{ - ResponseFunctionName: "responsefunctioname", - FunctionSourceType: "functionsourcetype", - FunctionSourceURI: "functionsourceuri", - Path: "path", - Method: "method", - } - sizeLimit := RequestSizeMeta{ - Path: "path", - Method: "method", - SizeLimit: 0, - } - methodTransformMeta := MethodTransformMeta{Path: "path", Method: "method", ToMethod: "tomethod"} - trackEndpointMeta := TrackEndpointMeta{Path: "path", Method: "method"} - internalMeta := InternalMeta{Path: "path", Method: "method"} - validatePathMeta := ValidatePathMeta{Path: "path", Method: "method", Schema: map[string]interface{}{}, SchemaB64: ""} - paths := struct { - Ignored []string `bson:"ignored" json:"ignored"` - WhiteList []string `bson:"white_list" json:"white_list"` - BlackList []string `bson:"black_list" json:"black_list"` - }{ - Ignored: []string{}, - WhiteList: []string{}, - BlackList: []string{}, - } - versionInfo := VersionInfo{ - Name: "Default", - UseExtendedPaths: true, - Paths: paths, - ExtendedPaths: ExtendedPathsSet{ - Ignored: []EndPointMeta{endpointMeta}, - WhiteList: []EndPointMeta{endpointMeta}, - BlackList: []EndPointMeta{endpointMeta}, - Cached: []string{}, - Transform: []TemplateMeta{templateMeta}, - TransformResponse: []TemplateMeta{templateMeta}, - TransformJQ: []TransformJQMeta{transformJQMeta}, - TransformJQResponse: []TransformJQMeta{transformJQMeta}, - TransformHeader: []HeaderInjectionMeta{headerInjectionMeta}, - TransformResponseHeader: []HeaderInjectionMeta{headerInjectionMeta}, - HardTimeouts: []HardTimeoutMeta{hardTimeoutMeta}, - CircuitBreaker: []CircuitBreakerMeta{circuitBreakerMeta}, - URLRewrite: []URLRewriteMeta{urlRewriteMeta}, - Virtual: []VirtualMeta{virtualMeta}, - SizeLimit: []RequestSizeMeta{sizeLimit}, - MethodTransforms: []MethodTransformMeta{methodTransformMeta}, - TrackEndpoints: []TrackEndpointMeta{trackEndpointMeta}, - DoNotTrackEndpoints: []TrackEndpointMeta{trackEndpointMeta}, - Internal: []InternalMeta{internalMeta}, - ValidateJSON: []ValidatePathMeta{validatePathMeta}, - }, - } - versionData := struct { - NotVersioned bool `bson:"not_versioned" json:"not_versioned"` - DefaultVersion string `bson:"default_version" json:"default_version"` - Versions map[string]VersionInfo `bson:"versions" json:"versions"` - }{ - NotVersioned: true, - DefaultVersion: "", - Versions: map[string]VersionInfo{ - "Default": versionInfo, - }, - } - - return APIDefinition{ - VersionData: versionData, - ConfigData: map[string]interface{}{}, - AllowedIPs: []string{}, - PinnedPublicKeys: map[string]string{}, - ResponseProcessors: []ResponseProcessor{}, - ClientCertificates: []string{}, - BlacklistedIPs: []string{}, - TagHeaders: []string{}, - UpstreamCertificates: map[string]string{}, - JWTScopeToPolicyMapping: map[string]string{}, - HmacAllowedAlgorithms: []string{}, - CustomMiddleware: MiddlewareSection{ - Post: []MiddlewareDefinition{}, - Pre: []MiddlewareDefinition{}, - PostKeyAuth: []MiddlewareDefinition{}, - AuthCheck: MiddlewareDefinition{}, - IdExtractor: MiddlewareIdExtractor{ - ExtractorConfig: map[string]interface{}{}, - }, - }, - Tags: []string{}, - } -} - -var Template = template.New("").Funcs(map[string]interface{}{ - "jsonMarshal": func(v interface{}) (string, error) { - bs, err := json.Marshal(v) - return string(bs), err - }, - "xmlMarshal": func(v interface{}) (string, error) { - var err error - var xmlValue []byte - mv, ok := v.(mxj.Map) - if ok { - mxj.XMLEscapeChars(true) - xmlValue, err = mv.Xml() - } else { - res, ok := v.(map[string]interface{}) - if ok { - mxj.XMLEscapeChars(true) - xmlValue, err = mxj.Map(res).Xml() - } else { - xmlValue, err = xml.MarshalIndent(v, "", " ") - } - } - - return string(xmlValue), err - }, -}) diff --git a/vendor/github.com/TykTechnologies/tyk/apidef/host_list.go b/vendor/github.com/TykTechnologies/tyk/apidef/host_list.go deleted file mode 100644 index 2b0bcf84..00000000 --- a/vendor/github.com/TykTechnologies/tyk/apidef/host_list.go +++ /dev/null @@ -1,54 +0,0 @@ -package apidef - -import ( - "errors" - "sync" -) - -type HostList struct { - hMutex sync.RWMutex - hosts []string -} - -func NewHostList() *HostList { - hl := HostList{} - hl.hosts = make([]string, 0) - return &hl -} - -func NewHostListFromList(newList []string) *HostList { - hl := NewHostList() - hl.Set(newList) - return hl -} - -func (h *HostList) Set(newList []string) { - h.hMutex.Lock() - defer h.hMutex.Unlock() - - h.hosts = newList -} - -func (h *HostList) All() []string { - return h.hosts -} - -func (h *HostList) GetIndex(i int) (string, error) { - if i < 0 { - return "", errors.New("index must be positive int") - } - h.hMutex.RLock() - defer h.hMutex.RUnlock() - - if i > len(h.hosts)-1 { - return "", errors.New("index out of range") - } - - return h.hosts[i], nil -} - -func (h *HostList) Len() int { - h.hMutex.RLock() - defer h.hMutex.RUnlock() - return len(h.hosts) -} diff --git a/vendor/github.com/TykTechnologies/tyk/apidef/notifications.go b/vendor/github.com/TykTechnologies/tyk/apidef/notifications.go deleted file mode 100644 index 6315b69b..00000000 --- a/vendor/github.com/TykTechnologies/tyk/apidef/notifications.go +++ /dev/null @@ -1,58 +0,0 @@ -package apidef - -import ( - "time" - - logger "github.com/TykTechnologies/tyk/log" - - "github.com/franela/goreq" -) - -var log = logger.Get() - -// NotificationsManager handles sending notifications to OAuth endpoints to notify the provider of key changes. -// TODO: Make this more generic -type NotificationsManager struct { - SharedSecret string `bson:"shared_secret" json:"shared_secret"` - OAuthKeyChangeURL string `bson:"oauth_on_keychange_url" json:"oauth_on_keychange_url"` -} - -// SendRequest sends the requested package (as a POST) to the defined -func (n NotificationsManager) SendRequest(wait bool, count int, notification interface{}) { - if n.OAuthKeyChangeURL == "" { - return - } - - if wait { - if count < 3 { - time.Sleep(10 * time.Second) - } else { - log.Error("Too many notification attempts, aborting.") - return - } - } - - req := goreq.Request{ - Method: "POST", - Uri: n.OAuthKeyChangeURL, - UserAgent: "Tyk-Gatewy-Notifications", - ContentType: "application/json", - Body: notification, - } - - req.AddHeader("X-Tyk-Shared-Secret", n.SharedSecret) - - resp, err := req.Do() - if err != nil { - log.Error("Request failed, trying again in 10s. Error was: ", err) - count++ - go n.SendRequest(true, count, notification) - return - } - if resp.StatusCode != 200 { - log.Error("Request returned non-200 status, trying again in 10s.") - count++ - go n.SendRequest(true, count, notification) - return - } -} diff --git a/vendor/github.com/TykTechnologies/tyk/apidef/rpc.go b/vendor/github.com/TykTechnologies/tyk/apidef/rpc.go deleted file mode 100644 index 8a6a6788..00000000 --- a/vendor/github.com/TykTechnologies/tyk/apidef/rpc.go +++ /dev/null @@ -1,30 +0,0 @@ -package apidef - -type InboundData struct { - KeyName string - Value string - SessionState string - Timeout int64 - Per int64 - Expire int64 -} - -type DefRequest struct { - OrgId string - Tags []string -} - -type GroupLoginRequest struct { - UserKey string - GroupID string -} - -type GroupKeySpaceRequest struct { - OrgID string - GroupID string -} - -type KeysValuesPair struct { - Keys []string - Values []string -} diff --git a/vendor/github.com/TykTechnologies/tyk/apidef/schema.go b/vendor/github.com/TykTechnologies/tyk/apidef/schema.go deleted file mode 100644 index 1a92d000..00000000 --- a/vendor/github.com/TykTechnologies/tyk/apidef/schema.go +++ /dev/null @@ -1,440 +0,0 @@ -package apidef - -const Schema = `{ - "type": ["object", "null"], - "$schema": "http://json-schema.org/draft-04/schema", - "id": "http://jsonschema.net", - "additionalProperties": false, - "properties": { - "is_site": { - "type": "boolean" - }, - "uptime_tests": { - "type": ["object", "null"] - }, - "expire_analytics_after": { - "type": "number" - }, - "id": { - "type": "string" - }, - "org_id": { - "type": "string" - }, - "api_id": { - "type": "string" - }, - "enable_ip_whitelisting": { - "type": "boolean" - }, - "enable_ip_blacklisting": { - "type": "boolean" - }, - "enable_context_vars": { - "type": "boolean" - }, - "strip_auth_data": { - "type": "boolean" - }, - "do_not_track": { - "type": "boolean" - }, - "enable_jwt": { - "type": "boolean" - }, - "use_openid": { - "type": "boolean" - }, - "openid_options": { - "type": ["object", "null"] - }, - "use_standard_auth": { - "type": "boolean" - }, - "use_go_plugin_auth": { - "type": "boolean" - }, - "enable_coprocess_auth": { - "type": "boolean" - }, - "jwt_skip_kid": { - "type": "boolean" - }, - "base_identity_provided_by": { - "type": "string" - }, - "disable_rate_limit": { - "type": "boolean" - }, - "disable_quota": { - "type": "boolean" - }, - "custom_middleware_bundle": { - "type": "string" - }, - "jwt_policy_field_name": { - "type": "string" - }, - "jwt_default_policies": { - "type": ["array", "null"] - }, - "jwt_signing_method": { - "type": "string" - }, - "jwt_source": { - "type": "string" - }, - "jwt_identity_base_field": { - "type": "string" - }, - "jwt_client_base_field": { - "type": "string" - }, - "jwt_disable_issued_at_validation": { - "type": "boolean" - }, - "jwt_disable_expires_at_validation": { - "type": "boolean" - }, - "jwt_disable_not_before_validation": { - "type": "boolean" - }, - "jwt_issued_at_validation_skew": { - "type": "number" - }, - "jwt_expires_at_validation_skew": { - "type": "number" - }, - "jwt_not_before_validation_skew": { - "type": "number" - }, - "jwt_scope_to_policy_mapping": { - "type": ["object", "null"] - }, - "jwt_scope_claim_name": { - "type": "string" - }, - "use_keyless": { - "type": "boolean" - }, - "use_basic_auth": { - "type": "boolean" - }, - "use_mutual_tls_auth": { - "type": "boolean" - }, - "client_certificates": { - "type": ["array", "null"] - }, - "upstream_certificates": { - "type": ["object", "null"] - }, - "pinned_public_keys": { - "type": ["object", "null"] - }, - "allowed_ips": { - "type": ["array", "null"] - }, - "blacklisted_ips": { - "type": ["array", "null"] - }, - "enable_batch_request_support": { - "type": "boolean" - }, - "event_handlers": { - "type":["object", "null"] - }, - "notifications": { - "type":["object", "null"] - }, - "use_oauth2": { - "type": "boolean" - }, - "oauth_meta": { - "type":["object", "null"] - }, - "cache_options": { - "type":["object", "null"] - }, - "tags": { - "type": ["array", "null"] - }, - "tag_headers": { - "type": ["array", "null"] - }, - "basic_auth": { - "type": ["object", "null"] - }, - "CORS": { - "type":["object", "null"] - }, - "response_processors": { - "type": ["array", "null"] - }, - "auth_provider": { - "type":["object", "null"], - "properties": { - "name": { - "type": "string", - "enum": [""] - }, - "storage_engine": { - "type": "string", - "enum": [""] - } - } - }, - "session_provider": { - "type":["object", "null"], - "properties": { - "name": { - "type": "string", - "enum": [""] - }, - "storage_engine": { - "type": "string", - "enum": [""] - } - } - }, - "hmac_allowed_clock_skew": { - "type": "number" - }, - "hmac_allowed_algorithms": { - "type": ["array", "null"] - }, - "dont_set_quota_on_create": { - "type": "boolean" - }, - "custom_middleware": { - "type":["object", "null"], - "properties": { - "pre": { - "type": ["array", "null"] - }, - "post": { - "type": ["array", "null"] - } - } - }, - "session_lifetime": { - "type": "number" - }, - "enable_detailed_recording": { - "type": "boolean" - }, - "enable_signature_checking": { - "type": "boolean" - }, - "active": { - "type": "boolean" - }, - "internal": { - "type": "boolean" - }, - "auth": { - "type": ["object", "null"], - "id": "http://jsonschema.net/auth", - "properties": { - "auth_header_name": { - "type": "string", - "id": "http://jsonschema.net/auth/auth_header_name" - }, - "use_certificate": { - "type": "boolean" - } - } - }, - "auth_configs":{ - "type": ["object", "null"] - }, - "definition": { - "type": ["object", "null"], - "id": "http://jsonschema.net/definition", - "properties": { - "key": { - "type": "string", - "id": "http://jsonschema.net/definition/key" - }, - "location": { - "type": "string", - "id": "http://jsonschema.net/definition/location" - }, - "strip_path": { - "type": "boolean", - "id": "http://jsonschema.net/definition/location" - } - }, - "required": [ - "key", - "location" - ] - }, - "name": { - "type": "string", - "id": "http://jsonschema.net/name" - }, - "slug": { - "type": "string", - "pattern": "[a-zA-Z0-9]*", - "id": "http://jsonschema.net/name" - }, - "domain": { - "type": "string" - }, - "listen_port": { - "type": "number" - }, - "protocol": { - "type": "string" - }, - "enable_proxy_protocol": { - "type": "boolean" - }, - "certificates": { - "type": ["array", "null"] - }, - "check_host_against_uptime_tests": { - "type": "boolean" - }, - "proxy": { - "type": ["object", "null"], - "id": "http://jsonschema.net/proxy", - "properties": { - "target_url": { - "type": "string", - "id": "http://jsonschema.net/proxy/target_url" - }, - "check_host_against_uptime_tests": { - "type": "boolean" - }, - "preserve_host_header": { - "type": "boolean" - }, - "transport": { - "type": ["object", "null"], - "properties": { - "ssl_ciphers": { - "type": ["array", "null"] - }, - "ssl_min_version": { - "type": "number" - }, - "proxy_url": { - "type": "string" - }, - "ssl_force_common_name_check": { - "type": "boolean" - } - } - } - }, - "required": [ - "target_url" - ] - }, - "hook_references": { - "type": ["object", "null"] - }, - "version_data": { - "type": ["object", "null"], - "id": "http://jsonschema.net/version_data", - "properties": { - "not_versioned": { - "type": "boolean", - "id": "http://jsonschema.net/version_data/not_versioned" - }, - "default_version":{ - "type": "string", - "id": "http://jsonschema.net/version_data/default_version" - }, - "versions": { - "type": ["object", "null"], - "id": "http://jsonschema.net/version_data/versions", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "title": "versionInfoProperty", - "type": ["object", "null"], - "id": "http://jsonschema.net/access_rights/versionInfoProperty", - "properties": { - "expires": { - "type": "string", - "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/expires" - }, - "name": { - "type": "string", - "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/name" - }, - "paths": { - "type": ["object", "null"], - "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths", - "properties": { - "black_list": { - "type": ["array", "null"], - "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths/black_list" - }, - "ignored": { - "type": ["array", "null"], - "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths/ignored" - }, - "white_list": { - "type": ["array", "null"], - "id": "http://jsonschema.net/version_data/versions/versionInfoProperty/paths/white_list" - } - } - } - }, - "required": [ - "name" - ] - } - } - } - }, - "required": [ - "not_versioned", - "versions" - ] - }, - "config_data": { - "type": ["object", "null"] - }, - "global_rate_limit": { - "type": ["object", "null"], - "properties": { - "rate": { - "type": "number" - }, - "per": { - "type": "number" - } - } - }, - "request_signing": { - "type": ["object", "null"], - "properties": { - "is_enabled": { - "type": "boolean" - }, - "secret": { - "type": "string" - }, - "key_id": { - "type": "string" - }, - "algorithm": { - "type": "string" - } - }, - "required": [ - "is_enabled" - ] - } - }, - "required": [ - "auth", - "name", - "proxy", - "version_data" - ] -}` diff --git a/vendor/github.com/TykTechnologies/tyk/config/config.go b/vendor/github.com/TykTechnologies/tyk/config/config.go deleted file mode 100644 index 5d6773f9..00000000 --- a/vendor/github.com/TykTechnologies/tyk/config/config.go +++ /dev/null @@ -1,661 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sync" - "sync/atomic" - "time" - - "github.com/kelseyhightower/envconfig" - - "github.com/TykTechnologies/tyk/apidef" - logger "github.com/TykTechnologies/tyk/log" - "github.com/TykTechnologies/tyk/regexp" -) - -type IPsHandleStrategy string - -var ( - log = logger.Get() - global atomic.Value - globalMu sync.Mutex - - Default = Config{ - ListenPort: 8080, - Secret: "352d20ee67be67f6340b4c0605b044b7", - TemplatePath: "templates", - MiddlewarePath: "middleware", - AppPath: "apps/", - Storage: StorageOptionsConf{ - Type: "redis", - Host: "localhost", - MaxIdle: 100, - Port: 6379, - }, - AnalyticsConfig: AnalyticsConfigConfig{ - IgnoredIPs: make([]string, 0), - }, - DnsCache: DnsCacheConfig{ - Enabled: false, - TTL: dnsCacheDefaultTtl, - CheckInterval: dnsCacheDefaultCheckInterval, - MultipleIPsHandleStrategy: NoCacheStrategy, - }, - } -) - -const ( - envPrefix = "TYK_GW" - - dnsCacheDefaultTtl = 3600 - dnsCacheDefaultCheckInterval = 60 - - PickFirstStrategy IPsHandleStrategy = "pick_first" - RandomStrategy IPsHandleStrategy = "random" - NoCacheStrategy IPsHandleStrategy = "no_cache" - - DefaultDashPolicySource = "service" - DefaultDashPolicyRecordName = "tyk_policies" -) - -type PoliciesConfig struct { - PolicySource string `json:"policy_source"` - PolicyConnectionString string `json:"policy_connection_string"` - PolicyRecordName string `json:"policy_record_name"` - AllowExplicitPolicyID bool `json:"allow_explicit_policy_id"` -} - -type DBAppConfOptionsConfig struct { - ConnectionString string `json:"connection_string"` - NodeIsSegmented bool `json:"node_is_segmented"` - Tags []string `json:"tags"` -} - -type StorageOptionsConf struct { - Type string `json:"type"` - Host string `json:"host"` - Port int `json:"port"` - Hosts map[string]string `json:"hosts"` // Deprecated: Addrs instead. - Addrs []string `json:"addrs"` - MasterName string `json:"master_name"` - Username string `json:"username"` - Password string `json:"password"` - Database int `json:"database"` - MaxIdle int `json:"optimisation_max_idle"` - MaxActive int `json:"optimisation_max_active"` - Timeout int `json:"timeout"` - EnableCluster bool `json:"enable_cluster"` - UseSSL bool `json:"use_ssl"` - SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify"` -} - -type NormalisedURLConfig struct { - Enabled bool `json:"enabled"` - NormaliseUUIDs bool `json:"normalise_uuids"` - NormaliseNumbers bool `json:"normalise_numbers"` - Custom []string `json:"custom_patterns"` - CompiledPatternSet NormaliseURLPatterns `json:"-"` // see analytics.go -} - -type NormaliseURLPatterns struct { - UUIDs *regexp.Regexp - IDs *regexp.Regexp - Custom []*regexp.Regexp -} - -type AnalyticsConfigConfig struct { - Type string `json:"type"` - IgnoredIPs []string `json:"ignored_ips"` - EnableDetailedRecording bool `json:"enable_detailed_recording"` - EnableGeoIP bool `json:"enable_geo_ip"` - GeoIPDBLocation string `json:"geo_ip_db_path"` - NormaliseUrls NormalisedURLConfig `json:"normalise_urls"` - PoolSize int `json:"pool_size"` - RecordsBufferSize uint64 `json:"records_buffer_size"` - StorageExpirationTime int `json:"storage_expiration_time"` - ignoredIPsCompiled map[string]bool -} - -type HealthCheckConfig struct { - EnableHealthChecks bool `json:"enable_health_checks"` - HealthCheckValueTimeout int64 `json:"health_check_value_timeouts"` -} - -type DnsCacheConfig struct { - Enabled bool `json:"enabled"` - TTL int64 `json:"ttl"` - CheckInterval int64 `json:"-" ignored:"true"` //controls cache cleanup interval. By convention shouldn't be exposed to config or env_variable_setup - MultipleIPsHandleStrategy IPsHandleStrategy `json:"multiple_ips_handle_strategy"` -} - -type MonitorConfig struct { - EnableTriggerMonitors bool `json:"enable_trigger_monitors"` - Config WebHookHandlerConf `json:"configuration"` - GlobalTriggerLimit float64 `json:"global_trigger_limit"` - MonitorUserKeys bool `json:"monitor_user_keys"` - MonitorOrgKeys bool `json:"monitor_org_keys"` -} - -type WebHookHandlerConf struct { - Method string `bson:"method" json:"method"` - TargetPath string `bson:"target_path" json:"target_path"` - TemplatePath string `bson:"template_path" json:"template_path"` - HeaderList map[string]string `bson:"header_map" json:"header_map"` - EventTimeout int64 `bson:"event_timeout" json:"event_timeout"` -} - -type SlaveOptionsConfig struct { - UseRPC bool `json:"use_rpc"` - UseSSL bool `json:"use_ssl"` - SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify"` - ConnectionString string `json:"connection_string"` - RPCKey string `json:"rpc_key"` - APIKey string `json:"api_key"` - EnableRPCCache bool `json:"enable_rpc_cache"` - BindToSlugsInsteadOfListenPaths bool `json:"bind_to_slugs"` - DisableKeySpaceSync bool `json:"disable_keyspace_sync"` - GroupID string `json:"group_id"` - CallTimeout int `json:"call_timeout"` - PingTimeout int `json:"ping_timeout"` - RPCPoolSize int `json:"rpc_pool_size"` -} - -type LocalSessionCacheConf struct { - DisableCacheSessionState bool `json:"disable_cached_session_state"` - CachedSessionTimeout int `json:"cached_session_timeout"` - CacheSessionEviction int `json:"cached_session_eviction"` -} - -type HttpServerOptionsConfig struct { - OverrideDefaults bool `json:"override_defaults"` - ReadTimeout int `json:"read_timeout"` - WriteTimeout int `json:"write_timeout"` - UseSSL bool `json:"use_ssl"` - UseLE_SSL bool `json:"use_ssl_le"` - EnableHttp2 bool `json:"enable_http2"` - SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify"` - EnableWebSockets bool `json:"enable_websockets"` - Certificates []CertData `json:"certificates"` - SSLCertificates []string `json:"ssl_certificates"` - ServerName string `json:"server_name"` - MinVersion uint16 `json:"min_version"` - FlushInterval int `json:"flush_interval"` - SkipURLCleaning bool `json:"skip_url_cleaning"` - SkipTargetPathEscaping bool `json:"skip_target_path_escaping"` - Ciphers []string `json:"ssl_ciphers"` -} - -type AuthOverrideConf struct { - ForceAuthProvider bool `json:"force_auth_provider"` - AuthProvider apidef.AuthProviderMeta `json:"auth_provider"` - ForceSessionProvider bool `json:"force_session_provider"` - SessionProvider apidef.SessionProviderMeta `json:"session_provider"` -} - -type UptimeTestsConfigDetail struct { - FailureTriggerSampleSize int `json:"failure_trigger_sample_size"` - TimeWait int `json:"time_wait"` - CheckerPoolSize int `json:"checker_pool_size"` - EnableUptimeAnalytics bool `json:"enable_uptime_analytics"` -} - -type UptimeTestsConfig struct { - Disable bool `json:"disable"` - Config UptimeTestsConfigDetail `json:"config"` -} - -type ServiceDiscoveryConf struct { - DefaultCacheTimeout int `json:"default_cache_timeout"` -} - -type CoProcessConfig struct { - EnableCoProcess bool `json:"enable_coprocess"` - CoProcessGRPCServer string `json:"coprocess_grpc_server"` - GRPCRecvMaxSize int `json:"grpc_recv_max_size"` - GRPCSendMaxSize int `json:"grpc_send_max_size"` - PythonPathPrefix string `json:"python_path_prefix"` - PythonVersion string `json:"python_version"` -} - -type CertificatesConfig struct { - API []string `json:"apis"` - Upstream map[string]string `json:"upstream"` - ControlAPI []string `json:"control_api"` - Dashboard []string `json:"dashboard_api"` - MDCB []string `json:"mdcb_api"` -} - -type SecurityConfig struct { - PrivateCertificateEncodingSecret string `json:"private_certificate_encoding_secret"` - ControlAPIUseMutualTLS bool `json:"control_api_use_mutual_tls"` - PinnedPublicKeys map[string]string `json:"pinned_public_keys"` - Certificates CertificatesConfig `json:"certificates"` -} - -type NewRelicConfig struct { - AppName string `json:"app_name"` - LicenseKey string `json:"license_key"` -} - -type Tracer struct { - // The name of the tracer to initialize. For instance appdash, to use appdash - // tracer - Name string `json:"name"` - - // If true then this tracer will be activated and all tracing data will be sent - // to this tracer.NoOp tracer is used otherwise which collects traces but - // discard them. - Enabled bool `json:"enabled"` - - // Key value pairs used to initialize the tracer. These are tracer specific, - // each tracer requires different options to operate. Please see trace package - // for options required by supported tracer implementation. - Options map[string]interface{} `json:"options"` -} - -// ServicePort defines a protocol and port on which a service can bind to -type ServicePort struct { - Protocol string `json:"protocol"` - Port int `json:"port"` -} - -// PortWhiteList defines ports that will be allowed by the gateway. -type PortWhiteList struct { - Ranges []PortRange `json:"ranges,omitempty"` - Ports []int `json:"ports,omitempty"` -} - -// Match returns true if port is acceptable from the PortWhiteList. -func (p PortWhiteList) Match(port int) bool { - for _, v := range p.Ports { - if port == v { - return true - } - } - for _, r := range p.Ranges { - if r.Match(port) { - return true - } - } - return false -} - -// PortRange defines a range of ports inclusively. -type PortRange struct { - From int `json:"from"` - To int `json:"to"` -} - -// Match returns true if port is within the range -func (r PortRange) Match(port int) bool { - return r.From <= port && r.To >= port -} - -// Config is the configuration object used by tyk to set up various parameters. -type Config struct { - // OriginalPath is the path to the config file that was read. If - // none was found, it's the path to the default config file that - // was written. - OriginalPath string `json:"-"` - - HostName string `json:"hostname"` - ListenAddress string `json:"listen_address"` - ListenPort int `json:"listen_port"` - ControlAPIHostname string `json:"control_api_hostname"` - ControlAPIPort int `json:"control_api_port"` - Secret string `json:"secret"` - NodeSecret string `json:"node_secret"` - PIDFileLocation string `json:"pid_file_location"` - AllowInsecureConfigs bool `json:"allow_insecure_configs"` - PublicKeyPath string `json:"public_key_path"` - AllowRemoteConfig bool `bson:"allow_remote_config" json:"allow_remote_config"` - Security SecurityConfig `json:"security"` - HttpServerOptions HttpServerOptionsConfig `json:"http_server_options"` - ReloadWaitTime int `bson:"reload_wait_time" json:"reload_wait_time"` - VersionHeader string `json:"version_header"` - UseAsyncSessionWrite bool `json:"optimisations_use_async_session_write"` - SuppressRedisSignalReload bool `json:"suppress_redis_signal_reload"` - - // Gateway Security Policies - HashKeys bool `json:"hash_keys"` - HashKeyFunction string `json:"hash_key_function"` - EnableHashedKeysListing bool `json:"enable_hashed_keys_listing"` - MinTokenLength int `json:"min_token_length"` - EnableAPISegregation bool `json:"enable_api_segregation"` - TemplatePath string `json:"template_path"` - Policies PoliciesConfig `json:"policies"` - DisablePortWhiteList bool `json:"disable_ports_whitelist"` - // Defines the ports that will be available for the api services to bind to. - // This is a map of protocol to PortWhiteList. This allows per protocol - // configurations. - PortWhiteList map[string]PortWhiteList `json:"ports_whitelist"` - - // CE Configurations - AppPath string `json:"app_path"` - - // Dashboard Configurations - UseDBAppConfigs bool `json:"use_db_app_configs"` - DBAppConfOptions DBAppConfOptionsConfig `json:"db_app_conf_options"` - Storage StorageOptionsConf `json:"storage"` - DisableDashboardZeroConf bool `json:"disable_dashboard_zeroconf"` - - // Slave Configurations - SlaveOptions SlaveOptionsConfig `json:"slave_options"` - ManagementNode bool `json:"management_node"` - AuthOverride AuthOverrideConf `json:"auth_override"` - - // Rate Limiting Strategy - EnableNonTransactionalRateLimiter bool `json:"enable_non_transactional_rate_limiter"` - EnableSentinelRateLimiter bool `json:"enable_sentinel_rate_limiter"` - EnableRedisRollingLimiter bool `json:"enable_redis_rolling_limiter"` - DRLNotificationFrequency int `json:"drl_notification_frequency"` - DRLThreshold float64 `json:"drl_threshold"` - - // Organization configurations - EnforceOrgDataAge bool `json:"enforce_org_data_age"` - EnforceOrgDataDetailLogging bool `json:"enforce_org_data_detail_logging"` - EnforceOrgQuotas bool `json:"enforce_org_quotas"` - ExperimentalProcessOrgOffThread bool `json:"experimental_process_org_off_thread"` - Monitor MonitorConfig `json:"monitor"` - - // Client-Gateway Configuration - MaxIdleConns int `bson:"max_idle_connections" json:"max_idle_connections"` - MaxIdleConnsPerHost int `bson:"max_idle_connections_per_host" json:"max_idle_connections_per_host"` - MaxConnTime int64 `json:"max_conn_time"` - CloseIdleConnections bool `json:"close_idle_connections"` - CloseConnections bool `json:"close_connections"` - EnableCustomDomains bool `json:"enable_custom_domains"` - // If AllowMasterKeys is set to true, session objects (key definitions) that do not have explicit access rights set - // will be allowed by Tyk. This means that keys that are created have access to ALL APIs, which in many cases is - // unwanted behaviour unless you are sure about what you are doing. - AllowMasterKeys bool `json:"allow_master_keys"` - - // Gateway-Service Configuration - ServiceDiscovery ServiceDiscoveryConf `json:"service_discovery"` - ProxySSLInsecureSkipVerify bool `json:"proxy_ssl_insecure_skip_verify"` - ProxyEnableHttp2 bool `json:"proxy_enable_http2"` - ProxySSLMinVersion uint16 `json:"proxy_ssl_min_version"` - ProxySSLCipherSuites []string `json:"proxy_ssl_ciphers"` - ProxyDefaultTimeout float64 `json:"proxy_default_timeout"` - ProxySSLDisableRenegotiation bool `json:"proxy_ssl_disable_renegotiation"` - ProxyCloseConnections bool `json:"proxy_close_connections"` - UptimeTests UptimeTestsConfig `json:"uptime_tests"` - HealthCheck HealthCheckConfig `json:"health_check"` - OauthRefreshExpire int64 `json:"oauth_refresh_token_expire"` - OauthTokenExpire int32 `json:"oauth_token_expire"` - OauthTokenExpiredRetainPeriod int32 `json:"oauth_token_expired_retain_period"` - OauthRedirectUriSeparator string `json:"oauth_redirect_uri_separator"` - OauthErrorStatusCode int `json:"oauth_error_status_code"` - EnableKeyLogging bool `json:"enable_key_logging"` - SSLForceCommonNameCheck bool `json:"ssl_force_common_name_check"` - - // Proxy analytics configuration - EnableAnalytics bool `json:"enable_analytics"` - AnalyticsConfig AnalyticsConfigConfig `json:"analytics_config"` - - // Cache - DnsCache DnsCacheConfig `json:"dns_cache"` - DisableRegexpCache bool `json:"disable_regexp_cache"` - RegexpCacheExpire int32 `json:"regexp_cache_expire"` - LocalSessionCache LocalSessionCacheConf `json:"local_session_cache"` - EnableSeperateCacheStore bool `json:"enable_separate_cache_store"` - CacheStorage StorageOptionsConf `json:"cache_storage"` - - // Middleware/Plugin Configuration - EnableBundleDownloader bool `bson:"enable_bundle_downloader" json:"enable_bundle_downloader"` - BundleBaseURL string `bson:"bundle_base_url" json:"bundle_base_url"` - EnableJSVM bool `json:"enable_jsvm"` - JSVMTimeout int `json:"jsvm_timeout"` - DisableVirtualPathBlobs bool `json:"disable_virtual_path_blobs"` - TykJSPath string `json:"tyk_js_path"` - MiddlewarePath string `json:"middleware_path"` - CoProcessOptions CoProcessConfig `json:"coprocess_options"` - - // Monitoring, Logging & Profiling - LogLevel string `json:"log_level"` - HealthCheckEndpointName string `json:"health_check_endpoint_name"` - Tracer Tracer `json:"tracing"` - NewRelic NewRelicConfig `json:"newrelic"` - HTTPProfile bool `json:"enable_http_profiler"` - UseRedisLog bool `json:"use_redis_log"` - SentryCode string `json:"sentry_code"` - UseSentry bool `json:"use_sentry"` - UseSyslog bool `json:"use_syslog"` - UseGraylog bool `json:"use_graylog"` - UseLogstash bool `json:"use_logstash"` - GraylogNetworkAddr string `json:"graylog_network_addr"` - LogstashNetworkAddr string `json:"logstash_network_addr"` - SyslogTransport string `json:"syslog_transport"` - LogstashTransport string `json:"logstash_transport"` - SyslogNetworkAddr string `json:"syslog_network_addr"` - StatsdConnectionString string `json:"statsd_connection_string"` - StatsdPrefix string `json:"statsd_prefix"` - - // Event System - EventHandlers apidef.EventHandlerMetaConfig `json:"event_handlers"` - EventTriggers map[apidef.TykEvent][]TykEventHandler `json:"event_trigers_defunct"` // Deprecated: Config.GetEventTriggers instead. - EventTriggersDefunct map[apidef.TykEvent][]TykEventHandler `json:"event_triggers_defunct"` // Deprecated: Config.GetEventTriggers instead. - - // TODO: These config options are not documented - What do they do? - SessionUpdatePoolSize int `json:"session_update_pool_size"` - SessionUpdateBufferSize int `json:"session_update_buffer_size"` - SupressDefaultOrgStore bool `json:"suppress_default_org_store"` - LegacyEnableAllowanceCountdown bool `bson:"legacy_enable_allowance_countdown" json:"legacy_enable_allowance_countdown"` - GlobalSessionLifetime int64 `bson:"global_session_lifetime" json:"global_session_lifetime"` - ForceGlobalSessionLifetime bool `bson:"force_global_session_lifetime" json:"force_global_session_lifetime"` - HideGeneratorHeader bool `json:"hide_generator_header"` - KV struct { - Consul ConsulConfig `json:"consul"` - Vault VaultConfig `json:"vault"` - } `json:"kv"` - - // Secrets are key-value pairs that can be accessed in the dashboard via "secrets://" - Secrets map[string]string `json:"secrets"` -} - -// VaultConfig is used to configure the creation of a client -// This is a stripped down version of the Config struct in vault's API client -type VaultConfig struct { - // Address is the address of the Vault server. This should be a complete - // URL such as "http://vault.example.com". - Address string `json:"address"` - - // AgentAddress is the address of the local Vault agent. This should be a - // complete URL such as "http://vault.example.com". - AgentAddress string `json:"agent_address"` - - // MaxRetries controls the maximum number of times to retry when a vault - // serer occurs - MaxRetries int `json:"max_retries"` - - Timeout time.Duration `json:"timeout"` - - // Token is the vault root token - Token string `json:"token"` - - // KVVersion is the version number of Vault. Usually defaults to 2 - KVVersion int `json:"kv_version"` -} - -// ConsulConfig is used to configure the creation of a client -// This is a stripped down version of the Config struct in consul's API client -type ConsulConfig struct { - // Address is the address of the Consul server - Address string `json:"address"` - - // Scheme is the URI scheme for the Consul server - Scheme string `json:"scheme"` - - // Datacenter to use. If not provided, the default agent datacenter is used. - Datacenter string `json:"datacenter"` - - // HttpAuth is the auth info to use for http access. - HttpAuth struct { - // Username to use for HTTP Basic Authentication - Username string `json:"username"` - - // Password to use for HTTP Basic Authentication - Password string `json:"password"` - } `json:"http_auth"` - - // WaitTime limits how long a Watch will block. If not provided, - // the agent default values will be used. - WaitTime time.Duration `json:"wait_time"` - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string `json:"token"` - - TLSConfig struct { - Address string `json:"address"` - - CAFile string `json:"ca_file"` - - CAPath string `json:"ca_path"` - - CertFile string `json:"cert_file"` - - KeyFile string `json:"key_file"` - - InsecureSkipVerify bool `json:"insecure_skip_verify"` - } `json:"tls_config"` -} - -// GetEventTriggers returns event triggers. There was a typo in the json tag. -// To maintain backward compatibility, this solution is chosen. -func (c Config) GetEventTriggers() map[apidef.TykEvent][]TykEventHandler { - if c.EventTriggersDefunct == nil { - return c.EventTriggers - } - - if c.EventTriggers != nil { - log.Info("Both event_trigers_defunct and event_triggers_defunct are configured in the config," + - " event_triggers_defunct will be used.") - } - - return c.EventTriggersDefunct -} - -func (c *Config) SetEventTriggers(eventTriggers map[apidef.TykEvent][]TykEventHandler) { - c.EventTriggersDefunct = eventTriggers -} - -type CertData struct { - Name string `json:"domain_name"` - CertFile string `json:"cert_file"` - KeyFile string `json:"key_file"` -} - -// EventMessage is a standard form to send event data to handlers -type EventMessage struct { - Type apidef.TykEvent - Meta interface{} - TimeStamp string -} - -// TykEventHandler defines an event handler, e.g. LogMessageEventHandler will handle an event by logging it to stdout. -type TykEventHandler interface { - Init(interface{}) error - HandleEvent(EventMessage) -} - -func init() { - SetGlobal(Config{}) -} - -func Global() Config { - return global.Load().(Config) -} - -func SetGlobal(conf Config) { - globalMu.Lock() - defer globalMu.Unlock() - global.Store(conf) -} - -func WriteConf(path string, conf *Config) error { - bs, err := json.MarshalIndent(conf, "", " ") - if err != nil { - return err - } - return ioutil.WriteFile(path, bs, 0644) -} - -// writeDefault will set conf to the default config and write it to disk -// in path, if the path is non-empty. -func WriteDefault(path string, conf *Config) error { - _, b, _, _ := runtime.Caller(0) - configPath := filepath.Dir(b) - rootPath := filepath.Dir(configPath) - Default.TemplatePath = filepath.Join(rootPath, "templates") - - *conf = Default - if err := envconfig.Process(envPrefix, conf); err != nil { - return err - } - if path == "" { - return nil - } - return WriteConf(path, conf) -} - -// Load will load a configuration file, trying each of the paths given -// and using the first one that is a regular file and can be opened. -// -// If none exists, a default config will be written to the first path in -// the list. -// -// An error will be returned only if any of the paths existed but was -// not a valid config file. -func Load(paths []string, conf *Config) error { - var r io.Reader - for _, path := range paths { - f, err := os.Open(path) - if err == nil { - r = f - conf.OriginalPath = path - break - } - if os.IsNotExist(err) { - continue - } - return err - } - if r == nil { - path := paths[0] - log.Warnf("No config file found, writing default to %s", path) - if err := WriteDefault(path, conf); err != nil { - return err - } - log.Info("Loading default configuration...") - return Load([]string{path}, conf) - } - if err := json.NewDecoder(r).Decode(&conf); err != nil { - return fmt.Errorf("couldn't unmarshal config: %v", err) - } - if err := envconfig.Process(envPrefix, conf); err != nil { - return fmt.Errorf("failed to process config env vars: %v", err) - } - return nil -} - -func (c *Config) LoadIgnoredIPs() { - c.AnalyticsConfig.ignoredIPsCompiled = make(map[string]bool, len(c.AnalyticsConfig.IgnoredIPs)) - for _, ip := range c.AnalyticsConfig.IgnoredIPs { - c.AnalyticsConfig.ignoredIPsCompiled[ip] = true - } -} - -func (c *Config) StoreAnalytics(ip string) bool { - if !c.EnableAnalytics { - return false - } - - return !c.AnalyticsConfig.ignoredIPsCompiled[ip] -} diff --git a/vendor/github.com/TykTechnologies/tyk/log/log.go b/vendor/github.com/TykTechnologies/tyk/log/log.go deleted file mode 100644 index 80a4d42f..00000000 --- a/vendor/github.com/TykTechnologies/tyk/log/log.go +++ /dev/null @@ -1,75 +0,0 @@ -package log - -import ( - "os" - "strings" - - "github.com/hashicorp/terraform/flatmap" - "github.com/sirupsen/logrus" - prefixed "github.com/x-cray/logrus-prefixed-formatter" -) - -var ( - log = logrus.New() - rawLog = logrus.New() - translations = make(map[string]string) -) - -// LoadTranslations takes a map[string]interface and flattens it to map[string]string -// Because translations have been loaded - we internally override log the formatter -// Nested entries are accessible using dot notation. -// example: `{"foo": {"bar": "baz"}}` -// flattened: `foo.bar: baz` -func LoadTranslations(thing map[string]interface{}) { - formatter := new(prefixed.TextFormatter) - formatter.TimestampFormat = `Jan 02 15:04:05` - formatter.FullTimestamp = true - log.Formatter = &TranslationFormatter{formatter} - translations = flatmap.Flatten(thing) -} - -type TranslationFormatter struct { - *prefixed.TextFormatter -} - -func (t *TranslationFormatter) Format(entry *logrus.Entry) ([]byte, error) { - if code, ok := entry.Data["code"]; ok { - if translation, ok := translations[code.(string)]; ok { - entry.Message = translation - } - } - return t.TextFormatter.Format(entry) -} - -type RawFormatter struct{} - -func (f *RawFormatter) Format(entry *logrus.Entry) ([]byte, error) { - return []byte(entry.Message), nil -} - -func init() { - formatter := new(prefixed.TextFormatter) - formatter.TimestampFormat = `Jan 02 15:04:05` - formatter.FullTimestamp = true - - log.Formatter = formatter - rawLog.Formatter = new(RawFormatter) -} - -func Get() *logrus.Logger { - switch strings.ToLower(os.Getenv("TYK_LOGLEVEL")) { - case "error": - log.Level = logrus.ErrorLevel - case "warn": - log.Level = logrus.WarnLevel - case "debug": - log.Level = logrus.DebugLevel - default: - log.Level = logrus.InfoLevel - } - return log -} - -func GetRaw() *logrus.Logger { - return rawLog -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/cache.go b/vendor/github.com/TykTechnologies/tyk/regexp/cache.go deleted file mode 100644 index d8a6b450..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/cache.go +++ /dev/null @@ -1,88 +0,0 @@ -package regexp - -import ( - "regexp" - "time" - - gocache "github.com/pmylund/go-cache" -) - -const ( - defaultCacheItemTTL = 60 * time.Second - defaultCacheCleanupInterval = 5 * time.Minute - - maxKeySize = 1024 - maxValueSize = 2048 -) - -type cache struct { - *gocache.Cache - isEnabled bool - ttl time.Duration -} - -func newCache(ttl time.Duration, isEnabled bool) *cache { - return &cache{ - Cache: gocache.New(ttl, defaultCacheCleanupInterval), - isEnabled: isEnabled, - ttl: ttl, - } -} - -func (c *cache) enabled() bool { - return c.isEnabled && c.Cache != nil -} - -func (c *cache) add(key string, value interface{}) { - c.Add(key, value, c.ttl) -} - -func (c *cache) getRegexp(key string) (*regexp.Regexp, bool) { - if val, found := c.Get(key); found { - return val.(*regexp.Regexp).Copy(), true - } - - return nil, false -} - -func (c *cache) getString(key string) (string, bool) { - if val, found := c.Get(key); found { - return val.(string), true - } - - return "", false -} - -func (c *cache) getStrSlice(key string) ([]string, bool) { - if val, found := c.Get(key); found { - return val.([]string), true - } - - return []string{}, false -} - -func (c *cache) getStrSliceOfSlices(key string) ([][]string, bool) { - if val, found := c.Get(key); found { - return val.([][]string), true - } - - return [][]string{}, false -} - -func (c *cache) getBool(key string) (bool, bool) { - if val, found := c.Get(key); found { - return val.(bool), true - } - - return false, false -} - -func (c *cache) reset(ttl time.Duration, isEnabled bool) { - if c.Cache == nil { - return - } - - c.isEnabled = isEnabled - c.ttl = ttl - c.Flush() -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp.go b/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp.go deleted file mode 100644 index dc1f5302..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp.go +++ /dev/null @@ -1,59 +0,0 @@ -package regexp - -import ( - "regexp" - "time" -) - -type regexpCache struct { - *cache - noCacheFunc func(string) (*regexp.Regexp, error) -} - -func newRegexpCache(ttl time.Duration, isEnabled bool, fn func(string) (*regexp.Regexp, error)) *regexpCache { - return ®expCache{ - cache: newCache( - ttl, - isEnabled, - ), - noCacheFunc: fn, - } -} - -func (c *regexpCache) doNoCacheFunc(str string) (*Regexp, error) { - rx, err := c.noCacheFunc(str) - if err != nil { - return nil, err - } - - return &Regexp{ - rx, - false, - }, - nil -} - -func (c *regexpCache) do(str string) (*Regexp, error) { - // return if cache is not enabled - if !c.enabled() { - return c.doNoCacheFunc(str) - } - - // cache hit - if rx, found := c.getRegexp(str); found { - return &Regexp{ - rx, - true, - }, - nil - } - - // cache miss, add to cache - regExp, err := c.doNoCacheFunc(str) - if err != nil { - return nil, err - } - c.add(str, regExp.Regexp) - - return regExp, nil -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_byte_ret_bool.go b/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_byte_ret_bool.go deleted file mode 100644 index 52004de7..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_byte_ret_bool.go +++ /dev/null @@ -1,47 +0,0 @@ -package regexp - -import ( - "regexp" - "time" -) - -type regexpByteRetBoolCache struct { - *cache -} - -func newRegexpByteRetBoolCache(ttl time.Duration, isEnabled bool) *regexpByteRetBoolCache { - return ®expByteRetBoolCache{ - cache: newCache( - ttl, - isEnabled, - ), - } -} - -func (c *regexpByteRetBoolCache) do(r *regexp.Regexp, b []byte, noCacheFn func([]byte) bool) bool { - // return if cache is not enabled - if !c.enabled() { - return noCacheFn(b) - } - - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendBytes(b).UnsafeKey() - if len(nsKey) > maxKeySize { - return noCacheFn(b) - } - - // cache hit - if res, found := c.getBool(nsKey); found { - return res - } - - // cache miss, add to cache - res := noCacheFn(b) - c.add(kb.Key(), res) - - return res -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_func_ret_str.go b/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_func_ret_str.go deleted file mode 100644 index fb4519c6..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_func_ret_str.go +++ /dev/null @@ -1,51 +0,0 @@ -package regexp - -import ( - "regexp" - "time" -) - -type regexpStrFuncRetStrCache struct { - *cache -} - -func newRegexpStrFuncRetStrCache(ttl time.Duration, isEnabled bool) *regexpStrFuncRetStrCache { - return ®expStrFuncRetStrCache{ - cache: newCache( - ttl, - isEnabled, - ), - } -} - -func (c *regexpStrFuncRetStrCache) do(r *regexp.Regexp, src string, repl func(string) string, noCacheFn func(string, func(string) string) string) string { - // return if cache is not enabled - if !c.enabled() { - return noCacheFn(src, repl) - } - - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(src).Appendf("%p", repl).UnsafeKey() - if len(nsKey) > maxKeySize { - return noCacheFn(src, repl) - } - - // cache hit - if res, found := c.getString(nsKey); found { - return res - } - - // cache miss, add to cache if value is not too big - res := noCacheFn(src, repl) - if len(res) > maxValueSize { - return res - } - - c.add(kb.Key(), res) - - return res -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_int_ret_slice_slice_str.go b/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_int_ret_slice_slice_str.go deleted file mode 100644 index b8dfb552..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_int_ret_slice_slice_str.go +++ /dev/null @@ -1,51 +0,0 @@ -package regexp - -import ( - "regexp" - "time" -) - -type regexpStrIntRetSliceSliceStrCache struct { - *cache -} - -func newRegexpStrIntRetSliceSliceStrCache(ttl time.Duration, isEnabled bool) *regexpStrIntRetSliceSliceStrCache { - return ®expStrIntRetSliceSliceStrCache{ - cache: newCache( - ttl, - isEnabled, - ), - } -} - -func (c *regexpStrIntRetSliceSliceStrCache) do(r *regexp.Regexp, s string, n int, noCacheFn func(s string, n int) [][]string) [][]string { - // return if cache is not enabled - if !c.enabled() { - return noCacheFn(s, n) - } - - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(s).AppendInt(n).UnsafeKey() - if len(nsKey) > maxKeySize { - return noCacheFn(s, n) - } - - // cache hit - if res, found := c.getStrSliceOfSlices(nsKey); found { - return res - } - - // cache miss, add to cache if value is not too big - res := noCacheFn(s, n) - if len(res) > maxValueSize { - return res - } - - c.add(kb.Key(), res) - - return res -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_int_ret_slice_str.go b/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_int_ret_slice_str.go deleted file mode 100644 index ad3c0723..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_int_ret_slice_str.go +++ /dev/null @@ -1,51 +0,0 @@ -package regexp - -import ( - "regexp" - "time" -) - -type regexpStrIntRetSliceStrCache struct { - *cache -} - -func newRegexpStrIntRetSliceStrCache(ttl time.Duration, isEnabled bool) *regexpStrIntRetSliceStrCache { - return ®expStrIntRetSliceStrCache{ - cache: newCache( - ttl, - isEnabled, - ), - } -} - -func (c *regexpStrIntRetSliceStrCache) do(r *regexp.Regexp, s string, n int, noCacheFn func(s string, n int) []string) []string { - // return if cache is not enabled - if !c.enabled() { - return noCacheFn(s, n) - } - - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(s).AppendInt(n).UnsafeKey() - if len(nsKey) > maxKeySize { - return noCacheFn(s, n) - } - - // cache hit - if res, found := c.getStrSlice(nsKey); found { - return res - } - - // cache miss, add to cache if value is not too big - res := noCacheFn(s, n) - if len(res) > maxValueSize { - return res - } - - c.add(kb.Key(), res) - - return res -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_ret_bool.go b/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_ret_bool.go deleted file mode 100644 index 6544b279..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_ret_bool.go +++ /dev/null @@ -1,47 +0,0 @@ -package regexp - -import ( - "regexp" - "time" -) - -type regexpStrRetBoolCache struct { - *cache -} - -func newRegexpStrRetBoolCache(ttl time.Duration, isEnabled bool) *regexpStrRetBoolCache { - return ®expStrRetBoolCache{ - cache: newCache( - ttl, - isEnabled, - ), - } -} - -func (c *regexpStrRetBoolCache) do(r *regexp.Regexp, s string, noCacheFn func(string) bool) bool { - // return if cache is not enabled - if !c.enabled() { - return noCacheFn(s) - } - - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(s).UnsafeKey() - if len(nsKey) > maxKeySize { - return noCacheFn(s) - } - - // cache hit - if res, found := c.getBool(nsKey); found { - return res - } - - // cache miss, add to cache - res := noCacheFn(s) - c.add(kb.Key(), res) - - return res -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_ret_slice_str.go b/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_ret_slice_str.go deleted file mode 100644 index ec1e7884..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_ret_slice_str.go +++ /dev/null @@ -1,49 +0,0 @@ -package regexp - -import ( - "regexp" - "time" -) - -type regexpStrRetSliceStrCache struct { - *cache -} - -func newRegexpStrRetSliceStrCache(ttl time.Duration, isEnabled bool) *regexpStrRetSliceStrCache { - return ®expStrRetSliceStrCache{ - cache: newCache( - ttl, - isEnabled, - ), - } -} - -func (c *regexpStrRetSliceStrCache) do(r *regexp.Regexp, s string, noCacheFn func(s string) []string) []string { - // return if cache is not enabled - if !c.enabled() { - return noCacheFn(s) - } - - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(s).UnsafeKey() - if len(nsKey) > maxKeySize { - return noCacheFn(s) - } - - // cache hit - if res, found := c.getStrSlice(nsKey); found { - return res - } - - // cache miss, add to cache if value is not too big - res := noCacheFn(s) - if len(res) <= maxValueSize { - c.add(kb.Key(), res) - } - - return res -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_str_ret_str.go b/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_str_ret_str.go deleted file mode 100644 index 788136d4..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/cache_regexp_str_str_ret_str.go +++ /dev/null @@ -1,51 +0,0 @@ -package regexp - -import ( - "regexp" - "time" -) - -type regexpStrStrRetStrCache struct { - *cache -} - -func newRegexpStrStrRetStrCache(ttl time.Duration, isEnabled bool) *regexpStrStrRetStrCache { - return ®expStrStrRetStrCache{ - cache: newCache( - ttl, - isEnabled, - ), - } -} - -func (c *regexpStrStrRetStrCache) do(r *regexp.Regexp, src string, repl string, noCacheFn func(string, string) string) string { - // return if cache is not enabled - if !c.enabled() { - return noCacheFn(src, repl) - } - - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(src).AppendString(repl).UnsafeKey() - if len(nsKey) > maxKeySize { - return noCacheFn(src, repl) - } - - // cache hit - if res, found := c.getString(nsKey); found { - return res - } - - // cache miss, add to cache if value is not too big - res := noCacheFn(src, repl) - if len(res) > maxValueSize { - return res - } - - c.add(kb.Key(), res) - - return res -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/keybuilder.go b/vendor/github.com/TykTechnologies/tyk/regexp/keybuilder.go deleted file mode 100644 index bc528aff..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/keybuilder.go +++ /dev/null @@ -1,66 +0,0 @@ -package regexp - -import ( - "fmt" - "strconv" - "sync" - "unsafe" -) - -var keyBuilderPool = sync.Pool{ - New: func() interface{} { return new(keyBuilder) }, -} - -// Combine logic of strings.Builder and bytes.Buffer. -// Allow to reuse builder with 0 allocs, as bytes.Buffer -// and also allow to get 0 alloc string representation -// as strings.Builder -type keyBuilder struct { - buf []byte -} - -// Reset resets the keyBuilder to be empty. -func (kb *keyBuilder) Reset() *keyBuilder { - kb.buf = kb.buf[:0] - return kb -} - -// Returns content of internal buffer, converted to string. -// Safe for using as key for storing item, immutable -func (kb *keyBuilder) Key() string { - return string(kb.buf) -} - -// Returns string representation of internal buffer. -// Mutable, sequential writes to keyBuilder will -// also mutate returned representation. -// Safe for lookups by key. -// Should not be used as key for storing items. -func (kb *keyBuilder) UnsafeKey() string { - return *(*string)(unsafe.Pointer(&kb.buf)) -} - -func (kb *keyBuilder) Write(p []byte) (int, error) { - kb.buf = append(kb.buf, p...) - return len(p), nil -} - -func (kb *keyBuilder) AppendString(s string) *keyBuilder { - kb.buf = append(kb.buf, s...) - return kb -} - -func (kb *keyBuilder) AppendBytes(b []byte) *keyBuilder { - kb.buf = append(kb.buf, b...) - return kb -} - -func (kb *keyBuilder) Appendf(format string, a ...interface{}) *keyBuilder { - fmt.Fprintf(kb, format, a...) - return kb -} - -func (kb *keyBuilder) AppendInt(n int) *keyBuilder { - kb.buf = strconv.AppendInt(kb.buf, int64(n), 10) - return kb -} diff --git a/vendor/github.com/TykTechnologies/tyk/regexp/regexp.go b/vendor/github.com/TykTechnologies/tyk/regexp/regexp.go deleted file mode 100644 index bf3d0c1e..00000000 --- a/vendor/github.com/TykTechnologies/tyk/regexp/regexp.go +++ /dev/null @@ -1,422 +0,0 @@ -/* -Package regexp implmenets API of Go's native "regexp" but with caching results in memory -*/ -package regexp - -import ( - "io" - "regexp" - "strconv" - "time" -) - -var ( - compileCache = newRegexpCache(defaultCacheItemTTL, true, regexp.Compile) - compilePOSIXCache = newRegexpCache(defaultCacheItemTTL, true, regexp.CompilePOSIX) - matchStringCache = newRegexpStrRetBoolCache(defaultCacheItemTTL, true) - matchCache = newRegexpByteRetBoolCache(defaultCacheItemTTL, true) - replaceAllStringCache = newRegexpStrStrRetStrCache(defaultCacheItemTTL, true) - replaceAllLiteralStringCache = newRegexpStrStrRetStrCache(defaultCacheItemTTL, true) - replaceAllStringFuncCache = newRegexpStrFuncRetStrCache(defaultCacheItemTTL, true) - findStringSubmatchCache = newRegexpStrRetSliceStrCache(defaultCacheItemTTL, true) - findAllStringCache = newRegexpStrIntRetSliceStrCache(defaultCacheItemTTL, true) - findAllStringSubmatchCache = newRegexpStrIntRetSliceSliceStrCache(defaultCacheItemTTL, true) -) - -// Regexp is a wrapper around regexp.Regexp but with caching -type Regexp struct { - *regexp.Regexp - FromCache bool -} - -// ResetCache resets cache to initial state -func ResetCache(ttl time.Duration, isEnabled bool) { - if ttl == 0 { - ttl = defaultCacheItemTTL - } - - compileCache.reset(ttl, isEnabled) - compilePOSIXCache.reset(ttl, isEnabled) - matchStringCache.reset(ttl, isEnabled) - matchCache.reset(ttl, isEnabled) - replaceAllStringCache.reset(ttl, isEnabled) - replaceAllLiteralStringCache.reset(ttl, isEnabled) - replaceAllStringFuncCache.reset(ttl, isEnabled) - findStringSubmatchCache.reset(ttl, isEnabled) - findAllStringCache.reset(ttl, isEnabled) - findAllStringSubmatchCache.reset(ttl, isEnabled) -} - -// Compile does the same as regexp.Compile but returns cached *Regexp instead. -func Compile(expr string) (*Regexp, error) { - return compileCache.do(expr) -} - -// CompilePOSIX does the same as regexp.CompilePOSIX but returns cached *Regexp instead. -func CompilePOSIX(expr string) (*Regexp, error) { - return compilePOSIXCache.do(expr) -} - -// MustCompile is the same as regexp.MustCompile but returns cached *Regexp instead. -func MustCompile(str string) *Regexp { - regexp, err := Compile(str) - if err != nil { - panic(`regexp: Compile(` + quote(str) + `): ` + err.Error()) - } - return regexp -} - -// MustCompilePOSIX is the same as regexp.MustCompilePOSIX but returns cached *Regexp instead. -func MustCompilePOSIX(str string) *Regexp { - regexp, err := CompilePOSIX(str) - if err != nil { - panic(`regexp: CompilePOSIX(` + quote(str) + `): ` + err.Error()) - } - return regexp -} - -// MatchString is the same as regexp.MatchString but returns cached result instead. -func MatchString(pattern string, s string) (matched bool, err error) { - re, err := Compile(pattern) - if err != nil { - return false, err - } - return re.MatchString(s), nil -} - -// Match is the same as regexp.Match but returns cached result instead. -func Match(pattern string, b []byte) (matched bool, err error) { - re, err := Compile(pattern) - if err != nil { - return false, err - } - return re.Match(b), nil -} - -// QuoteMeta is the same as regexp.QuoteMeta but returns cached result instead. -func QuoteMeta(s string) string { - // TODO: add cache for QuoteMeta - return regexp.QuoteMeta(s) -} - -func quote(s string) string { - if strconv.CanBackquote(s) { - return "`" + s + "`" - } - return strconv.Quote(s) -} - -// String returns the source text used to compile the wrapped regular expression. -func (re *Regexp) String() string { - if re.Regexp == nil { - return "" - } - return re.Regexp.String() -} - -// Copy returns a new Regexp object copied from re. -// -// When using a Regexp in multiple goroutines, giving each goroutine -// its own copy helps to avoid lock contention. -func (re *Regexp) Copy() *Regexp { - reCopy := &Regexp{ - FromCache: re.FromCache, - } - if re.Regexp != nil { - reCopy.Regexp = re.Regexp.Copy() - } - return reCopy -} - -// Longest calls regexp.Regexp.Longest of wrapped regular expression -func (re *Regexp) Longest() { - if re.Regexp == nil { - re.Regexp.Longest() - } -} - -// NumSubexp returns result of regexp.Regexp.NumSubexp of wrapped regular expression. -func (re *Regexp) NumSubexp() int { - if re.Regexp == nil { - return 0 - } - return re.Regexp.NumSubexp() -} - -// SubexpNames returns result of regexp.Regexp.SubexpNames of wrapped regular expression. -func (re *Regexp) SubexpNames() []string { - if re.Regexp == nil { - return []string{} - } - return re.Regexp.SubexpNames() -} - -// LiteralPrefix returns a literal string that must begin any match -// Calls regexp.Regexp.LiteralPrefix -func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { - if re.Regexp == nil { - return "", false - } - return re.Regexp.LiteralPrefix() -} - -// MatchReader reports whether the Regexp matches the text read by the -// RuneReader. -// Calls regexp.Regexp.MatchReader (NO CACHE) -func (re *Regexp) MatchReader(r io.RuneReader) bool { - if re.Regexp == nil { - return false - } - return re.Regexp.MatchReader(r) -} - -// MatchString reports whether the Regexp matches the string s. -func (re *Regexp) MatchString(s string) bool { - if re.Regexp == nil { - return false - } - return matchStringCache.do(re.Regexp, s, re.Regexp.MatchString) -} - -// Match reports whether the Regexp matches the byte slice b. -func (re *Regexp) Match(b []byte) bool { - if re.Regexp == nil { - return false - } - return matchCache.do(re.Regexp, b, re.Regexp.Match) -} - -// ReplaceAllString is the same as regexp.Regexp.ReplaceAllString but returns cached result instead. -func (re *Regexp) ReplaceAllString(src, repl string) string { - if re.Regexp == nil { - return "" - } - return replaceAllStringCache.do(re.Regexp, src, repl, re.Regexp.ReplaceAllString) -} - -// ReplaceAllLiteralString is the same as regexp.Regexp.ReplaceAllLiteralString but returns cached result instead. -func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { - if re.Regexp == nil { - return "" - } - return replaceAllLiteralStringCache.do(re.Regexp, src, repl, re.Regexp.ReplaceAllLiteralString) -} - -// ReplaceAllStringFunc is the same as regexp.Regexp.ReplaceAllStringFunc but returns cached result instead. -func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { - if re.Regexp == nil { - return "" - } - return replaceAllStringFuncCache.do(re.Regexp, src, repl, re.Regexp.ReplaceAllStringFunc) -} - -// ReplaceAll is the same as regexp.Regexp.ReplaceAll but returns cached result instead. -func (re *Regexp) ReplaceAll(src, repl []byte) []byte { - if re.Regexp == nil { - return []byte{} - } - // TODO: add cache for ReplaceAll - return re.Regexp.ReplaceAll(src, repl) -} - -// ReplaceAllLiteral is the same as regexp.Regexp.ReplaceAllLiteral but returns cached result instead. -func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { - if re.Regexp == nil { - return []byte{} - } - // TODO: add cache for ReplaceAllLiteral - return re.Regexp.ReplaceAllLiteral(src, repl) -} - -// ReplaceAllFunc is the same as regexp.Regexp.ReplaceAllFunc but returns cached result instead. -func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { - if re.Regexp == nil { - return []byte{} - } - // TODO: add cache for ReplaceAllFunc - return re.Regexp.ReplaceAllFunc(src, repl) -} - -// Find is the same as regexp.Regexp.Find but returns cached result instead. -func (re *Regexp) Find(b []byte) []byte { - if re.Regexp == nil { - return []byte{} - } - // TODO: add cache for Find - return re.Regexp.Find(b) -} - -// FindIndex is the same as regexp.Regexp.FindIndex but returns cached result instead. -func (re *Regexp) FindIndex(b []byte) (loc []int) { - if re.Regexp == nil { - return - } - // TODO: add cache for FindIndex - return re.Regexp.FindIndex(b) -} - -// FindString is the same as regexp.Regexp.FindString but returns cached result instead. -func (re *Regexp) FindString(s string) string { - if re.Regexp == nil { - return "" - } - // TODO: add cache for FindString - return re.Regexp.FindString(s) -} - -// FindStringIndex is the same as regexp.Regexp.FindStringIndex but returns cached result instead. -func (re *Regexp) FindStringIndex(s string) (loc []int) { - if re.Regexp == nil { - return - } - // TODO: add cache for FindStringIndex - return re.Regexp.FindStringIndex(s) -} - -// FindReaderIndex is the same as regexp.Regexp.FindReaderIndex (NO CACHE). -func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { - if re.Regexp == nil { - return - } - return re.Regexp.FindReaderIndex(r) -} - -// FindSubmatch is the same as regexp.Regexp.FindSubmatch but returns cached result instead. -func (re *Regexp) FindSubmatch(b []byte) [][]byte { - if re.Regexp == nil { - return [][]byte{} - } - // TODO: add cache for FindSubmatch - return re.Regexp.FindSubmatch(b) -} - -// Expand is the same as regexp.Regexp.Expand but returns cached result instead. -func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte { - if re.Regexp == nil { - return []byte{} - } - // TODO: add cache for Expand - return re.Regexp.Expand(dst, template, src, match) -} - -// ExpandString is the same as regexp.Regexp.ExpandString but returns cached result instead. -func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { - if re.Regexp == nil { - return []byte{} - } - // TODO: add cache for ExpandString - return re.Regexp.ExpandString(dst, template, src, match) -} - -// FindSubmatchIndex is the same as regexp.Regexp.FindSubmatchIndex but returns cached result instead. -func (re *Regexp) FindSubmatchIndex(b []byte) []int { - if re.Regexp == nil { - return []int{} - } - // TODO: add cache for ExpandString - return re.Regexp.FindSubmatchIndex(b) -} - -// FindStringSubmatch is the same as regexp.Regexp.FindStringSubmatch but returns cached result instead. -func (re *Regexp) FindStringSubmatch(s string) []string { - if re.Regexp == nil { - return []string{} - } - return findStringSubmatchCache.do(re.Regexp, s, re.Regexp.FindStringSubmatch) -} - -// FindStringSubmatchIndex is the same as regexp.Regexp.FindStringSubmatchIndex but returns cached result instead. -func (re *Regexp) FindStringSubmatchIndex(s string) []int { - if re.Regexp == nil { - return []int{} - } - // TODO: add cache for FindStringSubmatchIndex - return re.Regexp.FindStringSubmatchIndex(s) -} - -// FindReaderSubmatchIndex is the same as regexp.Regexp.FindReaderSubmatchIndex (NO CACHE). -func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { - if re.Regexp == nil { - return []int{} - } - return re.Regexp.FindReaderSubmatchIndex(r) -} - -// FindAll is the same as regexp.Regexp.FindAll but returns cached result instead. -func (re *Regexp) FindAll(b []byte, n int) [][]byte { - if re.Regexp == nil { - return [][]byte{} - } - // TODO: add cache for FindAll - return re.Regexp.FindAll(b, n) -} - -// FindAllIndex is the same as regexp.Regexp.FindAllIndex but returns cached result instead. -func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { - if re.Regexp == nil { - return [][]int{} - } - // TODO: add cache for FindAllIndex - return re.Regexp.FindAllIndex(b, n) -} - -// FindAllString is the same as regexp.Regexp.FindAllString but returns cached result instead. -func (re *Regexp) FindAllString(s string, n int) []string { - if re.Regexp == nil { - return []string{} - } - return findAllStringCache.do(re.Regexp, s, n, re.Regexp.FindAllString) -} - -// FindAllStringIndex is the same as regexp.Regexp.FindAllStringIndex but returns cached result instead. -func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { - if re.Regexp == nil { - return [][]int{} - } - // TODO: add cache for FindAllStringIndex - return re.Regexp.FindAllStringIndex(s, n) -} - -// FindAllSubmatch is the same as regexp.Regexp.FindAllSubmatch but returns cached result instead. -func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { - if re.Regexp == nil { - return [][][]byte{} - } - // TODO: add cache for FindAllSubmatch - return re.Regexp.FindAllSubmatch(b, n) -} - -// FindAllSubmatchIndex is the same as regexp.Regexp.FindAllSubmatchIndex but returns cached result instead. -func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { - if re.Regexp == nil { - return [][]int{} - } - // TODO: add cache for FindAllSubmatchIndex - return re.Regexp.FindAllSubmatchIndex(b, n) -} - -// FindAllStringSubmatch is the same as regexp.Regexp.FindAllStringSubmatch but returns cached result instead. -func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { - if re.Regexp == nil { - return [][]string{} - } - return findAllStringSubmatchCache.do(re.Regexp, s, n, re.Regexp.FindAllStringSubmatch) -} - -// FindAllStringSubmatchIndex is the same as regexp.Regexp.FindAllStringSubmatchIndex but returns cached result instead. -func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { - if re.Regexp == nil { - return [][]int{} - } - // TODO: add cache for FindAllStringSubmatchIndex - return re.Regexp.FindAllStringSubmatchIndex(s, n) -} - -// Split is the same as regexp.Regexp.Split but returns cached result instead. -func (re *Regexp) Split(s string, n int) []string { - if re.Regexp == nil { - return []string{} - } - // TODO: add cache for Split - return re.Regexp.Split(s, n) -} diff --git a/vendor/github.com/TykTechnologies/tyk/user/policy.go b/vendor/github.com/TykTechnologies/tyk/user/policy.go deleted file mode 100644 index a4effce7..00000000 --- a/vendor/github.com/TykTechnologies/tyk/user/policy.go +++ /dev/null @@ -1,33 +0,0 @@ -package user - -import "gopkg.in/mgo.v2/bson" - -type Policy struct { - MID bson.ObjectId `bson:"_id,omitempty" json:"_id"` - ID string `bson:"id,omitempty" json:"id"` - Name string `bson:"name" json:"name"` - OrgID string `bson:"org_id" json:"org_id"` - Rate float64 `bson:"rate" json:"rate"` - Per float64 `bson:"per" json:"per"` - QuotaMax int64 `bson:"quota_max" json:"quota_max"` - QuotaRenewalRate int64 `bson:"quota_renewal_rate" json:"quota_renewal_rate"` - ThrottleInterval float64 `bson:"throttle_interval" json:"throttle_interval"` - ThrottleRetryLimit int `bson:"throttle_retry_limit" json:"throttle_retry_limit"` - AccessRights map[string]AccessDefinition `bson:"access_rights" json:"access_rights"` - HMACEnabled bool `bson:"hmac_enabled" json:"hmac_enabled"` - EnableHTTPSignatureValidation bool `json:"enable_http_signature_validation" msg:"enable_http_signature_validation"` - Active bool `bson:"active" json:"active"` - IsInactive bool `bson:"is_inactive" json:"is_inactive"` - Tags []string `bson:"tags" json:"tags"` - KeyExpiresIn int64 `bson:"key_expires_in" json:"key_expires_in"` - Partitions PolicyPartitions `bson:"partitions" json:"partitions"` - LastUpdated string `bson:"last_updated" json:"last_updated"` - MetaData map[string]interface{} `bson:"meta_data" json:"meta_data"` -} - -type PolicyPartitions struct { - Quota bool `bson:"quota" json:"quota"` - RateLimit bool `bson:"rate_limit" json:"rate_limit"` - Acl bool `bson:"acl" json:"acl"` - PerAPI bool `bson:"per_api" json:"per_api"` -} diff --git a/vendor/github.com/TykTechnologies/tyk/user/session.go b/vendor/github.com/TykTechnologies/tyk/user/session.go deleted file mode 100644 index e9ad503b..00000000 --- a/vendor/github.com/TykTechnologies/tyk/user/session.go +++ /dev/null @@ -1,184 +0,0 @@ -package user - -import ( - "crypto/md5" - "fmt" - "time" - - "github.com/TykTechnologies/tyk/config" - logger "github.com/TykTechnologies/tyk/log" -) - -var log = logger.Get() - -type HashType string - -const ( - HashPlainText HashType = "" - HashBCrypt HashType = "bcrypt" -) - -// AccessSpecs define what URLS a user has access to an what methods are enabled -type AccessSpec struct { - URL string `json:"url" msg:"url"` - Methods []string `json:"methods" msg:"methods"` -} - -// APILimit stores quota and rate limit on ACL level (per API) -type APILimit struct { - Rate float64 `json:"rate" msg:"rate"` - Per float64 `json:"per" msg:"per"` - ThrottleInterval float64 `json:"throttle_interval" msg:"throttle_interval"` - ThrottleRetryLimit int `json:"throttle_retry_limit" msg:"throttle_retry_limit"` - QuotaMax int64 `json:"quota_max" msg:"quota_max"` - QuotaRenews int64 `json:"quota_renews" msg:"quota_renews"` - QuotaRemaining int64 `json:"quota_remaining" msg:"quota_remaining"` - QuotaRenewalRate int64 `json:"quota_renewal_rate" msg:"quota_renewal_rate"` - SetBy string `json:"-" msg:"-"` -} - -// AccessDefinition defines which versions of an API a key has access to -type AccessDefinition struct { - APIName string `json:"api_name" msg:"api_name"` - APIID string `json:"api_id" msg:"api_id"` - Versions []string `json:"versions" msg:"versions"` - AllowedURLs []AccessSpec `bson:"allowed_urls" json:"allowed_urls" msg:"allowed_urls"` // mapped string MUST be a valid regex - Limit *APILimit `json:"limit" msg:"limit"` - - AllowanceScope string `json:"allowance_scope" msg:"allowance_scope"` -} - -// SessionState objects represent a current API session, mainly used for rate limiting. -// There's a data structure that's based on this and it's used for Protocol Buffer support, make sure to update "coprocess/proto/coprocess_session_state.proto" and generate the bindings using: cd coprocess/proto && ./update_bindings.sh -// -// swagger:model -type SessionState struct { - LastCheck int64 `json:"last_check" msg:"last_check"` - Allowance float64 `json:"allowance" msg:"allowance"` - Rate float64 `json:"rate" msg:"rate"` - Per float64 `json:"per" msg:"per"` - ThrottleInterval float64 `json:"throttle_interval" msg:"throttle_interval"` - ThrottleRetryLimit int `json:"throttle_retry_limit" msg:"throttle_retry_limit"` - DateCreated time.Time `json:"date_created" msg:"date_created"` - Expires int64 `json:"expires" msg:"expires"` - QuotaMax int64 `json:"quota_max" msg:"quota_max"` - QuotaRenews int64 `json:"quota_renews" msg:"quota_renews"` - QuotaRemaining int64 `json:"quota_remaining" msg:"quota_remaining"` - QuotaRenewalRate int64 `json:"quota_renewal_rate" msg:"quota_renewal_rate"` - AccessRights map[string]AccessDefinition `json:"access_rights" msg:"access_rights"` - OrgID string `json:"org_id" msg:"org_id"` - OauthClientID string `json:"oauth_client_id" msg:"oauth_client_id"` - OauthKeys map[string]string `json:"oauth_keys" msg:"oauth_keys"` - Certificate string `json:"certificate" msg:"certificate"` - BasicAuthData struct { - Password string `json:"password" msg:"password"` - Hash HashType `json:"hash_type" msg:"hash_type"` - } `json:"basic_auth_data" msg:"basic_auth_data"` - JWTData struct { - Secret string `json:"secret" msg:"secret"` - } `json:"jwt_data" msg:"jwt_data"` - HMACEnabled bool `json:"hmac_enabled" msg:"hmac_enabled"` - EnableHTTPSignatureValidation bool `json:"enable_http_signature_validation" msg:"enable_http_signature_validation"` - HmacSecret string `json:"hmac_string" msg:"hmac_string"` - RSACertificateId string `json:"rsa_certificate_id" msg:"rsa_certificate_id"` - IsInactive bool `json:"is_inactive" msg:"is_inactive"` - ApplyPolicyID string `json:"apply_policy_id" msg:"apply_policy_id"` - ApplyPolicies []string `json:"apply_policies" msg:"apply_policies"` - DataExpires int64 `json:"data_expires" msg:"data_expires"` - Monitor struct { - TriggerLimits []float64 `json:"trigger_limits" msg:"trigger_limits"` - } `json:"monitor" msg:"monitor"` - EnableDetailedRecording bool `json:"enable_detail_recording" msg:"enable_detail_recording"` - MetaData map[string]interface{} `json:"meta_data" msg:"meta_data"` - Tags []string `json:"tags" msg:"tags"` - Alias string `json:"alias" msg:"alias"` - LastUpdated string `json:"last_updated" msg:"last_updated"` - IdExtractorDeadline int64 `json:"id_extractor_deadline" msg:"id_extractor_deadline"` - SessionLifetime int64 `bson:"session_lifetime" json:"session_lifetime"` - - // Used to store token hash - keyHash string -} - -func (s *SessionState) MD5Hash() string { - return fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%+v", s)))) -} - -func (s *SessionState) KeyHash() string { - if s.keyHash == "" { - panic("KeyHash cache not found. You should call `SetKeyHash` before.") - } - - return s.keyHash -} - -func (s *SessionState) SetKeyHash(hash string) { - s.keyHash = hash -} - -func (s *SessionState) KeyHashEmpty() bool { - return s.keyHash == "" -} - -func (s *SessionState) Lifetime(fallback int64) int64 { - if config.Global().ForceGlobalSessionLifetime { - return config.Global().GlobalSessionLifetime - } - if s.SessionLifetime > 0 { - return s.SessionLifetime - } - if fallback > 0 { - return fallback - } - return 0 -} - -// PolicyIDs returns the IDs of all the policies applied to this -// session. For backwards compatibility reasons, this falls back to -// ApplyPolicyID if ApplyPolicies is empty. -func (s *SessionState) PolicyIDs() []string { - if len(s.ApplyPolicies) > 0 { - return s.ApplyPolicies - } - if s.ApplyPolicyID != "" { - return []string{s.ApplyPolicyID} - } - return nil -} - -func (s *SessionState) SetPolicies(ids ...string) { - s.ApplyPolicyID = "" - s.ApplyPolicies = ids -} - -// PoliciesEqualTo compares and returns true if passed slice if IDs contains only current ApplyPolicies -func (s *SessionState) PoliciesEqualTo(ids []string) bool { - if len(s.ApplyPolicies) != len(ids) { - return false - } - - polIDMap := make(map[string]bool, len(ids)) - for _, id := range ids { - polIDMap[id] = true - } - - for _, curID := range s.ApplyPolicies { - if !polIDMap[curID] { - return false - } - } - - return true -} - -// GetQuotaLimitByAPIID return quota max, quota remaining, quota renewal rate and quota renews for the given session -func (s *SessionState) GetQuotaLimitByAPIID(apiID string) (int64, int64, int64, int64) { - if access, ok := s.AccessRights[apiID]; ok && access.Limit != nil { - return access.Limit.QuotaMax, - access.Limit.QuotaRemaining, - access.Limit.QuotaRenewalRate, - access.Limit.QuotaRenews - } - - return s.QuotaMax, s.QuotaRemaining, s.QuotaRenewalRate, s.QuotaRenews -} diff --git a/vendor/github.com/agext/levenshtein/.gitignore b/vendor/github.com/agext/levenshtein/.gitignore deleted file mode 100644 index 4473da19..00000000 --- a/vendor/github.com/agext/levenshtein/.gitignore +++ /dev/null @@ -1,53 +0,0 @@ -# Ignore docs files -_gh_pages -_site - -# Ignore temporary files -README.html -coverage.out -.tmp - -# Numerous always-ignore extensions -*.diff -*.err -*.log -*.orig -*.rej -*.swo -*.swp -*.vi -*.zip -*~ - -# OS or Editor folders -._* -.cache -.DS_Store -.idea -.project -.settings -.tmproj -*.esproj -*.sublime-project -*.sublime-workspace -nbproject -Thumbs.db - -# Komodo -.komodotools -*.komodoproject - -# SCSS-Lint -scss-lint-report.xml - -# grunt-contrib-sass cache -.sass-cache - -# Jekyll metadata -docs/.jekyll-metadata - -# Folders to ignore -.build -.test -bower_components -node_modules diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml deleted file mode 100644 index a51a1446..00000000 --- a/vendor/github.com/agext/levenshtein/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -language: go -sudo: false -matrix: - fast_finish: true - include: - - go: 1.11.x - env: TEST_METHOD=goveralls - - go: 1.10.x - - go: tip - - go: 1.9.x - - go: 1.8.x - - go: 1.7.x - - go: 1.6.x - - go: 1.5.x - allow_failures: - - go: tip - - go: 1.9.x - - go: 1.8.x - - go: 1.7.x - - go: 1.6.x - - go: 1.5.x -script: ./test.sh $TEST_METHOD -notifications: - email: - on_success: never diff --git a/vendor/github.com/agext/levenshtein/DCO b/vendor/github.com/agext/levenshtein/DCO deleted file mode 100644 index 716561d5..00000000 --- a/vendor/github.com/agext/levenshtein/DCO +++ /dev/null @@ -1,36 +0,0 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. diff --git a/vendor/github.com/agext/levenshtein/LICENSE b/vendor/github.com/agext/levenshtein/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/agext/levenshtein/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/agext/levenshtein/MAINTAINERS b/vendor/github.com/agext/levenshtein/MAINTAINERS deleted file mode 100644 index 726c2afb..00000000 --- a/vendor/github.com/agext/levenshtein/MAINTAINERS +++ /dev/null @@ -1 +0,0 @@ -Alex Bucataru (@AlexBucataru) diff --git a/vendor/github.com/agext/levenshtein/NOTICE b/vendor/github.com/agext/levenshtein/NOTICE deleted file mode 100644 index eaffaab9..00000000 --- a/vendor/github.com/agext/levenshtein/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Alrux Go EXTensions (AGExt) - package levenshtein -Copyright 2016 ALRUX Inc. - -This product includes software developed at ALRUX Inc. -(http://www.alrux.com/). diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md deleted file mode 100644 index 9e425587..00000000 --- a/vendor/github.com/agext/levenshtein/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# A Go package for calculating the Levenshtein distance between two strings - -[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein)  -[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein) -[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein) -[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein) - - -This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org). - -## Project Status - -v1.2.2 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. - -This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. - -## Overview - -The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. - -A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. - -The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. - -The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. - -The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. - -## Installation - -``` -go get github.com/agext/levenshtein -``` - -## License - -Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/agext/levenshtein/go.mod b/vendor/github.com/agext/levenshtein/go.mod deleted file mode 100644 index 545d432b..00000000 --- a/vendor/github.com/agext/levenshtein/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/agext/levenshtein diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go deleted file mode 100644 index df69ce70..00000000 --- a/vendor/github.com/agext/levenshtein/levenshtein.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2016 ALRUX Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure. - -The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. - -A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. - -The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. - -The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. - -The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. -*/ -package levenshtein - -// Calculate determines the Levenshtein distance between two strings, using -// the given costs for each edit operation. It returns the distance along with -// the lengths of the longest common prefix and suffix. -// -// If maxCost is non-zero, the calculation stops as soon as the distance is determined -// to be greater than maxCost. Therefore, any return value higher than maxCost is a -// lower bound for the actual distance. -func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) { - l1, l2 := len(str1), len(str2) - // trim common prefix, if any, as it doesn't affect the distance - for ; prefixLen < l1 && prefixLen < l2; prefixLen++ { - if str1[prefixLen] != str2[prefixLen] { - break - } - } - str1, str2 = str1[prefixLen:], str2[prefixLen:] - l1 -= prefixLen - l2 -= prefixLen - // trim common suffix, if any, as it doesn't affect the distance - for 0 < l1 && 0 < l2 { - if str1[l1-1] != str2[l2-1] { - str1, str2 = str1[:l1], str2[:l2] - break - } - l1-- - l2-- - suffixLen++ - } - // if the first string is empty, the distance is the length of the second string times the cost of insertion - if l1 == 0 { - dist = l2 * insCost - return - } - // if the second string is empty, the distance is the length of the first string times the cost of deletion - if l2 == 0 { - dist = l1 * delCost - return - } - - // variables used in inner "for" loops - var y, dy, c, l int - - // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited' - if maxCost > 0 { - if subCost < delCost+insCost { - if maxCost >= l1*subCost+(l2-l1)*insCost { - maxCost = 0 - } - } else { - if maxCost >= l1*delCost+l2*insCost { - maxCost = 0 - } - } - } - - if maxCost > 0 { - // prefer the longer string first, to minimize time; - // a swap also transposes the meanings of insertion and deletion. - if l1 < l2 { - str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost - } - - // the length differential times cost of deletion is a lower bound for the cost; - // if it is higher than the maxCost, there is no point going into the main calculation. - if dist = (l1 - l2) * delCost; dist > maxCost { - return - } - - d := make([]int, l1+1) - - // offset and length of d in the current row - doff, dlen := 0, 1 - for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ { - d[y] = dy - y++ - dy = y * delCost - } - // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) - - for x := 0; x < l2; x++ { - dy, d[doff] = d[doff], d[doff]+insCost - for d[doff] > maxCost && dlen > 0 { - if str1[doff] != str2[x] { - dy += subCost - } - doff++ - dlen-- - if c = d[doff] + insCost; c < dy { - dy = c - } - dy, d[doff] = d[doff], dy - } - for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy { - if str1[y] != str2[x] { - dy += subCost - } - if c = d[y] + delCost; c < dy { - dy = c - } - y++ - if c = d[y] + insCost; c < dy { - dy = c - } - } - if y < l1 { - if str1[y] != str2[x] { - dy += subCost - } - if c = d[y] + delCost; c < dy { - dy = c - } - for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy { - y++ - dlen++ - } - } - // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) - if dlen == 0 { - dist = maxCost + 1 - return - } - } - if doff+dlen-1 < l1 { - dist = maxCost + 1 - return - } - dist = d[l1] - } else { - // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is - // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space - // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html - - // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway; - // a swap also transposes the meanings of insertion and deletion. - if l1 > l2 { - str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost - } - d := make([]int, l1+1) - - for y = 1; y <= l1; y++ { - d[y] = y * delCost - } - for x := 0; x < l2; x++ { - dy, d[0] = d[0], d[0]+insCost - for y = 0; y < l1; dy, d[y] = d[y], dy { - if str1[y] != str2[x] { - dy += subCost - } - if c = d[y] + delCost; c < dy { - dy = c - } - y++ - if c = d[y] + insCost; c < dy { - dy = c - } - } - } - dist = d[l1] - } - - return -} - -// Distance returns the Levenshtein distance between str1 and str2, using the -// default or provided cost values. Pass nil for the third argument to use the -// default cost of 1 for all three operations, with no maximum. -func Distance(str1, str2 string, p *Params) int { - if p == nil { - p = defaultParams - } - dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost) - return dist -} - -// Similarity returns a score in the range of 0..1 for how similar the two strings are. -// A score of 1 means the strings are identical, and 0 means they have nothing in common. -// -// A nil third argument uses the default cost of 1 for all three operations. -// -// If a non-zero MinScore value is provided in the parameters, scores lower than it -// will be returned as 0. -func Similarity(str1, str2 string, p *Params) float64 { - return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus -} - -// Match returns a similarity score adjusted by the same method as proposed by Winkler for -// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their -// similarity score is already over a threshold. -// -// The score is in the range of 0..1, with 1 meaning the strings are identical, -// and 0 meaning they have nothing in common. -// -// A nil third argument uses the default cost of 1 for all three operations, maximum length of -// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7. -// -// If a non-zero MinScore value is provided in the parameters, scores lower than it -// will be returned as 0. -func Match(str1, str2 string, p *Params) float64 { - s1, s2 := []rune(str1), []rune(str2) - l1, l2 := len(s1), len(s2) - // two empty strings are identical; shortcut also avoids divByZero issues later on. - if l1 == 0 && l2 == 0 { - return 1 - } - - if p == nil { - p = defaultParams - } - - // a min over 1 can never be satisfied, so the score is 0. - if p.minScore > 1 { - return 0 - } - - insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0 - if l1 > l2 { - l1, l2, insCost, delCost = l2, l1, delCost, insCost - } - - if p.subCost < delCost+insCost { - maxDist = l1*p.subCost + (l2-l1)*insCost - } else { - maxDist = l1*delCost + l2*insCost - } - - // a zero min is always satisfied, so no need to set a max cost. - if p.minScore > 0 { - // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula - // for the max cost, because a sim score below min cannot receive a bonus. - if p.minScore < p.bonusThreshold { - // round down the max - a cost equal to a rounded up max would already be under min. - max = int((1 - p.minScore) * float64(maxDist)) - } else { - // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim) - // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist)) - // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist - // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist - // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist - max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale)) - } - } - - dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost) - if max > 0 && dist > max { - return 0 - } - sim := 1 - float64(dist)/float64(maxDist) - - if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 { - if pl > p.bonusPrefix { - pl = p.bonusPrefix - } - sim += float64(pl) * p.bonusScale * (1 - sim) - } - - if sim < p.minScore { - return 0 - } - - return sim -} diff --git a/vendor/github.com/agext/levenshtein/params.go b/vendor/github.com/agext/levenshtein/params.go deleted file mode 100644 index a85727b3..00000000 --- a/vendor/github.com/agext/levenshtein/params.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2016 ALRUX Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package levenshtein - -// Params represents a set of parameter values for the various formulas involved -// in the calculation of the Levenshtein string metrics. -type Params struct { - insCost int - subCost int - delCost int - maxCost int - minScore float64 - bonusPrefix int - bonusScale float64 - bonusThreshold float64 -} - -var ( - defaultParams = NewParams() -) - -// NewParams creates a new set of parameters and initializes it with the default values. -func NewParams() *Params { - return &Params{ - insCost: 1, - subCost: 1, - delCost: 1, - maxCost: 0, - minScore: 0, - bonusPrefix: 4, - bonusScale: .1, - bonusThreshold: .7, - } -} - -// Clone returns a pointer to a copy of the receiver parameter set, or of a new -// default parameter set if the receiver is nil. -func (p *Params) Clone() *Params { - if p == nil { - return NewParams() - } - return &Params{ - insCost: p.insCost, - subCost: p.subCost, - delCost: p.delCost, - maxCost: p.maxCost, - minScore: p.minScore, - bonusPrefix: p.bonusPrefix, - bonusScale: p.bonusScale, - bonusThreshold: p.bonusThreshold, - } -} - -// InsCost overrides the default value of 1 for the cost of insertion. -// The new value must be zero or positive. -func (p *Params) InsCost(v int) *Params { - if v >= 0 { - p.insCost = v - } - return p -} - -// SubCost overrides the default value of 1 for the cost of substitution. -// The new value must be zero or positive. -func (p *Params) SubCost(v int) *Params { - if v >= 0 { - p.subCost = v - } - return p -} - -// DelCost overrides the default value of 1 for the cost of deletion. -// The new value must be zero or positive. -func (p *Params) DelCost(v int) *Params { - if v >= 0 { - p.delCost = v - } - return p -} - -// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost. -// The calculation of Distance() stops when the result is guaranteed to exceed -// this maximum, returning a lower-bound rather than exact value. -// The new value must be zero or positive. -func (p *Params) MaxCost(v int) *Params { - if v >= 0 { - p.maxCost = v - } - return p -} - -// MinScore overrides the default value of 0 for the minimum similarity score. -// Scores below this threshold are returned as 0 by Similarity() and Match(). -// The new value must be zero or positive. Note that a minimum greater than 1 -// can never be satisfied, resulting in a score of 0 for any pair of strings. -func (p *Params) MinScore(v float64) *Params { - if v >= 0 { - p.minScore = v - } - return p -} - -// BonusPrefix overrides the default value for the maximum length of -// common prefix to be considered for bonus by Match(). -// The new value must be zero or positive. -func (p *Params) BonusPrefix(v int) *Params { - if v >= 0 { - p.bonusPrefix = v - } - return p -} - -// BonusScale overrides the default value for the scaling factor used by Match() -// in calculating the bonus. -// The new value must be zero or positive. To guarantee that the similarity score -// remains in the interval 0..1, this scaling factor is not allowed to exceed -// 1 / BonusPrefix. -func (p *Params) BonusScale(v float64) *Params { - if v >= 0 { - p.bonusScale = v - } - - // the bonus cannot exceed (1-sim), or the score may become greater than 1. - if float64(p.bonusPrefix)*p.bonusScale > 1 { - p.bonusScale = 1 / float64(p.bonusPrefix) - } - - return p -} - -// BonusThreshold overrides the default value for the minimum similarity score -// for which Match() can assign a bonus. -// The new value must be zero or positive. Note that a threshold greater than 1 -// effectively makes Match() become the equivalent of Similarity(). -func (p *Params) BonusThreshold(v float64) *Params { - if v >= 0 { - p.bonusThreshold = v - } - return p -} diff --git a/vendor/github.com/agext/levenshtein/test.sh b/vendor/github.com/agext/levenshtein/test.sh deleted file mode 100644 index c5ed7246..00000000 --- a/vendor/github.com/agext/levenshtein/test.sh +++ /dev/null @@ -1,10 +0,0 @@ -set -ev - -if [[ "$1" == "goveralls" ]]; then - echo "Testing with goveralls..." - go get github.com/mattn/goveralls - $HOME/gopath/bin/goveralls -service=travis-ci -else - echo "Testing with go test..." - go test -v ./... -fi diff --git a/vendor/github.com/apparentlymart/go-textseg/LICENSE b/vendor/github.com/apparentlymart/go-textseg/LICENSE deleted file mode 100644 index 684b03b4..00000000 --- a/vendor/github.com/apparentlymart/go-textseg/LICENSE +++ /dev/null @@ -1,95 +0,0 @@ -Copyright (c) 2017 Martin Atkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------- - -Unicode table generation programs are under a separate copyright and license: - -Copyright (c) 2014 Couchbase, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the -License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -either express or implied. See the License for the specific language governing permissions -and limitations under the License. - ---------- - -Grapheme break data is provided as part of the Unicode character database, -copright 2016 Unicode, Inc, which is provided with the following license: - -Unicode Data Files include all data files under the directories -http://www.unicode.org/Public/, http://www.unicode.org/reports/, -http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and -http://www.unicode.org/utility/trac/browser/. - -Unicode Data Files do not include PDF online code charts under the -directory http://www.unicode.org/Public/. - -Software includes any source code published in the Unicode Standard -or under the directories -http://www.unicode.org/Public/, http://www.unicode.org/reports/, -http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and -http://www.unicode.org/utility/trac/browser/. - -NOTICE TO USER: Carefully read the following legal agreement. -BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S -DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), -YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE -TERMS AND CONDITIONS OF THIS AGREEMENT. -IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE -THE DATA FILES OR SOFTWARE. - -COPYRIGHT AND PERMISSION NOTICE - -Copyright © 1991-2017 Unicode, Inc. All rights reserved. -Distributed under the Terms of Use in http://www.unicode.org/copyright.html. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of the Unicode data files and any associated documentation -(the "Data Files") or Unicode software and any associated documentation -(the "Software") to deal in the Data Files or Software -without restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, and/or sell copies of -the Data Files or Software, and to permit persons to whom the Data Files -or Software are furnished to do so, provided that either -(a) this copyright and permission notice appear with all copies -of the Data Files or Software, or -(b) this copyright and permission notice appear in associated -Documentation. - -THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT OF THIRD PARTY RIGHTS. -IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS -NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL -DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, -DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THE DATA FILES OR SOFTWARE. - -Except as contained in this notice, the name of a copyright holder -shall not be used in advertising or otherwise to promote the sale, -use or other dealings in these Data Files or Software without prior -written authorization of the copyright holder. diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go deleted file mode 100644 index 5752e9ef..00000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go +++ /dev/null @@ -1,30 +0,0 @@ -package textseg - -import ( - "bufio" - "bytes" -) - -// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of -// all of the recognized tokens in the given buffer. -func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) { - scanner := bufio.NewScanner(bytes.NewReader(buf)) - scanner.Split(splitFunc) - var ret [][]byte - for scanner.Scan() { - ret = append(ret, scanner.Bytes()) - } - return ret, scanner.Err() -} - -// TokenCount is a utility that uses a bufio.SplitFunc to count the number of -// recognized tokens in the given buffer. -func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) { - scanner := bufio.NewScanner(bytes.NewReader(buf)) - scanner.Split(splitFunc) - var ret int - for scanner.Scan() { - ret++ - } - return ret, scanner.Err() -} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go deleted file mode 100644 index 81f3a747..00000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go +++ /dev/null @@ -1,7 +0,0 @@ -package textseg - -//go:generate go run make_tables.go -output tables.go -//go:generate go run make_test_tables.go -output tables_test.go -//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl -//go:generate ragel -Z grapheme_clusters.rl -//go:generate gofmt -w grapheme_clusters.go diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go deleted file mode 100644 index 012bc690..00000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go +++ /dev/null @@ -1,5276 +0,0 @@ - -// line 1 "grapheme_clusters.rl" -package textseg - -import ( - "errors" - "unicode/utf8" -) - -// Generated from grapheme_clusters.rl. DO NOT EDIT - -// line 13 "grapheme_clusters.go" -var _graphclust_actions []byte = []byte{ - 0, 1, 0, 1, 4, 1, 9, 1, 10, - 1, 11, 1, 12, 1, 13, 1, 14, - 1, 15, 1, 16, 1, 17, 1, 18, - 1, 19, 1, 20, 1, 21, 2, 1, - 7, 2, 1, 8, 2, 2, 3, 2, - 5, 1, 3, 0, 1, 8, 3, 5, - 0, 1, 3, 5, 1, 6, -} - -var _graphclust_key_offsets []int16 = []int16{ - 0, 0, 1, 3, 5, 7, 10, 15, - 17, 20, 28, 31, 33, 35, 37, 67, - 75, 77, 81, 84, 89, 94, 104, 116, - 122, 127, 137, 140, 147, 151, 159, 169, - 173, 181, 183, 191, 194, 196, 201, 203, - 210, 212, 220, 221, 242, 246, 252, 257, - 259, 263, 267, 269, 273, 275, 278, 282, - 284, 291, 293, 297, 301, 305, 307, 309, - 318, 322, 327, 329, 335, 337, 338, 340, - 341, 343, 345, 347, 349, 364, 368, 370, - 372, 377, 381, 385, 387, 389, 393, 397, - 399, 403, 410, 415, 419, 422, 423, 427, - 434, 439, 440, 441, 443, 452, 454, 477, - 481, 483, 487, 491, 492, 496, 500, 503, - 505, 510, 523, 525, 527, 529, 531, 535, - 539, 541, 543, 545, 549, 553, 557, 559, - 561, 563, 565, 566, 568, 574, 580, 586, - 588, 592, 596, 601, 604, 614, 616, 618, - 621, 623, 625, 627, 629, 632, 637, 639, - 642, 650, 653, 655, 657, 659, 690, 698, - 700, 704, 711, 723, 730, 744, 750, 768, - 779, 785, 797, 800, 809, 814, 824, 830, - 844, 850, 862, 874, 878, 880, 886, 888, - 895, 898, 906, 907, 928, 937, 945, 951, - 953, 957, 961, 966, 972, 974, 977, 990, - 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, - 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, - 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, - 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, - 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, - 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, - 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, - 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, - 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, - 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, - 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, - 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, - 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, - 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, - 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, - 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, - 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, - 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, - 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, - 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, - 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, - 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, - 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, - 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, - 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, - 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, - 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, - 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, - 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, - 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, - 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, - 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, - 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, - 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, - 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, - 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, - 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, - 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, - 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, - 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, - 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, - 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, - 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, - 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, - 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, - 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, - 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, - 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, - 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, - 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, - 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, - 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, - 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, - 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, - 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, - 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, - 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, - 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, - 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, - 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, - 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, - 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, - 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, - 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, - 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, - 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, - 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, - 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, - 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, - 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, - 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, - 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, - 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, - 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, - 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, - 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, - 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, - 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, - 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, - 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, - 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, - 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, - 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, - 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, - 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, - 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, - 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, - 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, - 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, - 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, - 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, - 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, - 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, - 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, - 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, - 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, - 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, - 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, - 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, - 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, - 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, - 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, - 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, - 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, - 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, - 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, - 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, - 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, - 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, - 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, - 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, - 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, - 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, - 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, - 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, - 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, - 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, - 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, - 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, - 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, - 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, - 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, - 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, - 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, - 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, - 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, - 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, - 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, - 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, - 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, - 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, - 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, - 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, - 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, - 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, - 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, - 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, - 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, - 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, - 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, - 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, - 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, - 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, - 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, - 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, - 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, - 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, - 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, - 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, - 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, - 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, - 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, - 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, - 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, - 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, - 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, - 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, - 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, - 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, - 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, - 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, - 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, - 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, - 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, - 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, - 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, - 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, - 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, - 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, - 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, - 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, - 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, - 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, - 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, - 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, - 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, - 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, - 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, - 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, - 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, - 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, - 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, - 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, - 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, - 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, - 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, - 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, - 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, - 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, - 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, - 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, - 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, - 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, - 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, - 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, - 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, - 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, - 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, - 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, - 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, - 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, - 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, - 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, - 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, - 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, - 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, - 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, - 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, - 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, - 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, - 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, - 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, - 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, - 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, - 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, - 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, - 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, - 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, - 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, - 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, - 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, - 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, - 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, - 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, - 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, - 11052, 11072, 11092, 11112, -} - -var _graphclust_trans_keys []byte = []byte{ - 10, 128, 255, 176, 255, 131, 137, 191, - 145, 189, 135, 129, 130, 132, 133, 144, - 154, 176, 139, 159, 150, 156, 159, 164, - 167, 168, 170, 173, 145, 176, 255, 139, - 255, 166, 176, 171, 179, 160, 161, 163, - 164, 165, 167, 169, 171, 173, 174, 175, - 176, 177, 179, 180, 181, 182, 183, 184, - 185, 186, 187, 188, 189, 190, 191, 166, - 170, 172, 178, 150, 153, 155, 163, 165, - 167, 169, 173, 153, 155, 148, 161, 163, - 255, 189, 132, 185, 144, 152, 161, 164, - 255, 188, 129, 131, 190, 255, 133, 134, - 137, 138, 142, 150, 152, 161, 164, 255, - 131, 134, 137, 138, 142, 144, 146, 175, - 178, 180, 182, 255, 134, 138, 142, 161, - 164, 255, 188, 129, 131, 190, 191, 128, - 132, 135, 136, 139, 141, 150, 151, 162, - 163, 130, 190, 191, 151, 128, 130, 134, - 136, 138, 141, 128, 131, 190, 255, 133, - 137, 142, 148, 151, 161, 164, 255, 128, - 132, 134, 136, 138, 141, 149, 150, 162, - 163, 129, 131, 190, 255, 133, 137, 142, - 150, 152, 161, 164, 255, 130, 131, 138, - 150, 143, 148, 152, 159, 178, 179, 177, - 179, 186, 135, 142, 177, 179, 185, 187, - 188, 136, 141, 181, 183, 185, 152, 153, - 190, 191, 177, 191, 128, 132, 134, 135, - 141, 151, 153, 188, 134, 128, 129, 130, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 173, 183, 185, 190, 150, 153, - 158, 160, 177, 180, 130, 141, 157, 132, - 134, 157, 159, 146, 148, 178, 180, 146, - 147, 178, 179, 180, 255, 148, 156, 158, - 255, 139, 141, 169, 133, 134, 160, 171, - 176, 187, 151, 155, 160, 162, 191, 149, - 158, 165, 188, 176, 190, 128, 132, 180, - 255, 133, 170, 180, 255, 128, 130, 161, - 173, 166, 179, 164, 183, 173, 144, 146, - 148, 168, 178, 180, 184, 185, 128, 181, - 187, 191, 128, 131, 179, 181, 183, 140, - 141, 128, 131, 157, 179, 181, 183, 144, - 176, 164, 175, 177, 191, 160, 191, 128, - 130, 170, 175, 153, 154, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 175, 178, 180, 189, - 158, 159, 176, 177, 130, 134, 139, 163, - 167, 128, 129, 180, 255, 134, 159, 178, - 255, 166, 173, 135, 147, 128, 131, 179, - 255, 129, 164, 166, 255, 169, 182, 131, - 188, 140, 141, 176, 178, 180, 183, 184, - 190, 191, 129, 171, 175, 181, 182, 163, - 170, 172, 173, 172, 184, 190, 158, 128, - 143, 160, 175, 144, 145, 150, 155, 157, - 158, 159, 135, 139, 141, 168, 171, 189, - 160, 182, 186, 191, 129, 131, 133, 134, - 140, 143, 184, 186, 165, 166, 128, 129, - 130, 132, 133, 134, 135, 136, 139, 140, - 141, 144, 145, 146, 147, 150, 151, 152, - 153, 154, 156, 176, 178, 128, 130, 184, - 255, 135, 190, 131, 175, 187, 255, 128, - 130, 167, 180, 179, 128, 130, 179, 255, - 129, 137, 141, 255, 190, 172, 183, 159, - 170, 188, 128, 131, 190, 191, 151, 128, - 132, 135, 136, 139, 141, 162, 163, 166, - 172, 176, 180, 181, 191, 128, 134, 176, - 255, 132, 255, 175, 181, 184, 255, 129, - 155, 158, 255, 129, 255, 171, 183, 157, - 171, 175, 182, 184, 191, 146, 167, 169, - 182, 171, 172, 189, 190, 176, 180, 176, - 182, 145, 190, 143, 146, 178, 157, 158, - 133, 134, 137, 168, 169, 170, 165, 169, - 173, 178, 187, 255, 131, 132, 140, 169, - 174, 255, 130, 132, 128, 182, 187, 255, - 173, 180, 182, 255, 132, 155, 159, 161, - 175, 128, 163, 165, 128, 134, 136, 152, - 155, 161, 163, 164, 166, 170, 144, 150, - 132, 138, 145, 146, 151, 166, 169, 0, - 127, 176, 255, 131, 137, 191, 145, 189, - 135, 129, 130, 132, 133, 144, 154, 176, - 139, 159, 150, 156, 159, 164, 167, 168, - 170, 173, 145, 176, 255, 139, 255, 166, - 176, 171, 179, 160, 161, 163, 164, 165, - 166, 167, 169, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 168, 170, 150, 153, 155, 163, 165, 167, - 169, 173, 153, 155, 148, 161, 163, 255, - 131, 187, 189, 132, 185, 190, 255, 141, - 144, 129, 136, 145, 151, 152, 161, 162, - 163, 164, 255, 129, 188, 190, 130, 131, - 191, 255, 141, 151, 129, 132, 133, 134, - 137, 138, 142, 161, 162, 163, 164, 255, - 131, 188, 129, 130, 190, 255, 145, 181, - 129, 130, 131, 134, 135, 136, 137, 138, - 139, 141, 142, 175, 176, 177, 178, 255, - 134, 138, 141, 129, 136, 142, 161, 162, - 163, 164, 255, 129, 188, 130, 131, 190, - 191, 128, 141, 129, 132, 135, 136, 139, - 140, 150, 151, 162, 163, 130, 190, 191, - 128, 141, 151, 129, 130, 134, 136, 138, - 140, 128, 129, 131, 190, 255, 133, 137, - 129, 132, 142, 148, 151, 161, 164, 255, - 129, 188, 190, 191, 130, 131, 130, 134, - 128, 132, 135, 136, 138, 139, 140, 141, - 149, 150, 162, 163, 129, 190, 130, 131, - 191, 255, 133, 137, 141, 151, 129, 132, - 142, 161, 162, 163, 164, 255, 138, 143, - 150, 159, 144, 145, 146, 148, 152, 158, - 178, 179, 177, 179, 180, 186, 135, 142, - 177, 179, 180, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 191, - 177, 190, 128, 132, 134, 135, 141, 151, - 153, 188, 134, 128, 129, 130, 141, 156, - 157, 158, 159, 160, 162, 164, 168, 169, - 170, 172, 173, 174, 175, 176, 179, 183, - 177, 173, 183, 185, 186, 187, 188, 189, - 190, 150, 151, 152, 153, 158, 160, 177, - 180, 130, 132, 141, 157, 133, 134, 157, - 159, 146, 148, 178, 180, 146, 147, 178, - 179, 182, 180, 189, 190, 255, 134, 157, - 137, 147, 148, 255, 139, 141, 169, 133, - 134, 178, 160, 162, 163, 166, 167, 168, - 169, 171, 176, 184, 185, 187, 155, 151, - 152, 153, 154, 150, 160, 162, 191, 149, - 151, 152, 158, 165, 172, 173, 178, 179, - 188, 176, 190, 132, 181, 187, 128, 131, - 180, 188, 189, 255, 130, 133, 170, 171, - 179, 180, 255, 130, 161, 170, 128, 129, - 162, 165, 166, 167, 168, 173, 167, 173, - 166, 169, 170, 174, 175, 177, 178, 179, - 164, 171, 172, 179, 180, 181, 182, 183, - 161, 173, 180, 144, 146, 148, 168, 178, - 179, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 144, 176, - 175, 177, 191, 160, 191, 128, 130, 170, - 175, 153, 154, 153, 154, 155, 160, 162, - 163, 164, 165, 166, 167, 168, 169, 170, - 171, 175, 175, 178, 180, 189, 158, 159, - 176, 177, 130, 134, 139, 167, 163, 164, - 165, 166, 132, 133, 134, 159, 160, 177, - 178, 255, 166, 173, 135, 145, 146, 147, - 131, 179, 188, 128, 130, 180, 181, 182, - 185, 186, 255, 165, 129, 255, 169, 174, - 175, 176, 177, 178, 179, 180, 181, 182, - 131, 140, 141, 188, 176, 178, 180, 183, - 184, 190, 191, 129, 171, 181, 182, 172, - 173, 174, 175, 165, 168, 172, 173, 163, - 170, 172, 184, 190, 158, 128, 143, 160, - 175, 144, 145, 150, 155, 157, 158, 159, - 135, 139, 141, 168, 171, 189, 160, 182, - 186, 191, 129, 131, 133, 134, 140, 143, - 184, 186, 165, 166, 128, 129, 130, 132, - 133, 134, 135, 136, 139, 140, 141, 144, - 145, 146, 147, 150, 151, 152, 153, 154, - 156, 176, 178, 129, 128, 130, 184, 255, - 135, 190, 130, 131, 175, 176, 178, 183, - 184, 187, 255, 172, 128, 130, 167, 180, - 179, 130, 128, 129, 179, 181, 182, 190, - 191, 255, 129, 137, 138, 140, 141, 255, - 180, 190, 172, 174, 175, 177, 178, 181, - 182, 183, 159, 160, 162, 163, 170, 188, - 190, 191, 128, 129, 130, 131, 128, 151, - 129, 132, 135, 136, 139, 141, 162, 163, - 166, 172, 176, 180, 181, 183, 184, 191, - 133, 128, 129, 130, 134, 176, 185, 189, - 177, 178, 179, 186, 187, 190, 191, 255, - 129, 132, 255, 175, 190, 176, 177, 178, - 181, 184, 187, 188, 255, 129, 155, 158, - 255, 189, 176, 178, 179, 186, 187, 190, - 191, 255, 129, 255, 172, 182, 171, 173, - 174, 175, 176, 183, 166, 157, 159, 160, - 161, 162, 171, 175, 190, 176, 182, 184, - 191, 169, 177, 180, 146, 167, 170, 182, - 171, 172, 189, 190, 176, 180, 176, 182, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 166, 173, 165, 169, 174, - 178, 187, 255, 131, 132, 140, 169, 174, - 255, 130, 132, 128, 182, 187, 255, 173, - 180, 182, 255, 132, 155, 159, 161, 175, - 128, 163, 165, 128, 134, 136, 152, 155, - 161, 163, 164, 166, 170, 144, 150, 132, - 138, 143, 187, 191, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 139, - 168, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 144, 145, 150, 155, - 157, 158, 128, 191, 173, 128, 159, 160, - 191, 156, 128, 133, 134, 191, 0, 127, - 176, 255, 131, 137, 191, 145, 189, 135, - 129, 130, 132, 133, 144, 154, 176, 139, - 159, 150, 156, 159, 164, 167, 168, 170, - 173, 145, 176, 255, 139, 255, 166, 176, - 171, 179, 160, 161, 163, 164, 165, 167, - 169, 171, 173, 174, 175, 176, 177, 179, - 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 166, 170, 172, 178, - 150, 153, 155, 163, 165, 167, 169, 173, - 153, 155, 148, 161, 163, 255, 189, 132, - 185, 144, 152, 161, 164, 255, 188, 129, - 131, 190, 255, 133, 134, 137, 138, 142, - 150, 152, 161, 164, 255, 131, 134, 137, - 138, 142, 144, 146, 175, 178, 180, 182, - 255, 134, 138, 142, 161, 164, 255, 188, - 129, 131, 190, 191, 128, 132, 135, 136, - 139, 141, 150, 151, 162, 163, 130, 190, - 191, 151, 128, 130, 134, 136, 138, 141, - 128, 131, 190, 255, 133, 137, 142, 148, - 151, 161, 164, 255, 128, 132, 134, 136, - 138, 141, 149, 150, 162, 163, 129, 131, - 190, 255, 133, 137, 142, 150, 152, 161, - 164, 255, 130, 131, 138, 150, 143, 148, - 152, 159, 178, 179, 177, 179, 186, 135, - 142, 177, 179, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 177, - 191, 128, 132, 134, 135, 141, 151, 153, - 188, 134, 128, 129, 130, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 173, - 183, 185, 190, 150, 153, 158, 160, 177, - 180, 130, 141, 157, 132, 134, 157, 159, - 146, 148, 178, 180, 146, 147, 178, 179, - 180, 255, 148, 156, 158, 255, 139, 141, - 169, 133, 134, 160, 171, 176, 187, 151, - 155, 160, 162, 191, 149, 158, 165, 188, - 176, 190, 128, 132, 180, 255, 133, 170, - 180, 255, 128, 130, 161, 173, 166, 179, - 164, 183, 173, 144, 146, 148, 168, 178, - 180, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 128, 131, - 157, 179, 181, 183, 144, 176, 164, 175, - 177, 191, 160, 191, 128, 130, 170, 175, - 153, 154, 153, 154, 155, 160, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 175, 175, 178, 180, 189, 158, 159, 176, - 177, 130, 134, 139, 163, 167, 128, 129, - 180, 255, 134, 159, 178, 255, 166, 173, - 135, 147, 128, 131, 179, 255, 129, 164, - 166, 255, 169, 182, 131, 188, 140, 141, - 176, 178, 180, 183, 184, 190, 191, 129, - 171, 175, 181, 182, 163, 170, 172, 173, - 172, 184, 190, 158, 128, 143, 160, 175, - 144, 145, 150, 155, 157, 158, 159, 135, - 139, 141, 168, 171, 189, 160, 182, 186, - 191, 129, 131, 133, 134, 140, 143, 184, - 186, 165, 166, 128, 129, 130, 132, 133, - 134, 135, 136, 139, 140, 141, 144, 145, - 146, 147, 150, 151, 152, 153, 154, 156, - 176, 178, 128, 130, 184, 255, 135, 190, - 131, 175, 187, 255, 128, 130, 167, 180, - 179, 128, 130, 179, 255, 129, 137, 141, - 255, 190, 172, 183, 159, 170, 188, 128, - 131, 190, 191, 151, 128, 132, 135, 136, - 139, 141, 162, 163, 166, 172, 176, 180, - 181, 191, 128, 134, 176, 255, 132, 255, - 175, 181, 184, 255, 129, 155, 158, 255, - 129, 255, 171, 183, 157, 171, 175, 182, - 184, 191, 146, 167, 169, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 145, 190, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 145, - 146, 151, 166, 169, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 141, 144, 129, 136, - 145, 151, 152, 161, 162, 163, 164, 255, - 129, 188, 190, 130, 131, 191, 255, 141, - 151, 129, 132, 133, 134, 137, 138, 142, - 161, 162, 163, 164, 255, 131, 188, 129, - 130, 190, 255, 145, 181, 129, 130, 131, - 134, 135, 136, 137, 138, 139, 141, 142, - 175, 176, 177, 178, 255, 134, 138, 141, - 129, 136, 142, 161, 162, 163, 164, 255, - 129, 188, 130, 131, 190, 191, 128, 141, - 129, 132, 135, 136, 139, 140, 150, 151, - 162, 163, 130, 190, 191, 128, 141, 151, - 129, 130, 134, 136, 138, 140, 128, 129, - 131, 190, 255, 133, 137, 129, 132, 142, - 148, 151, 161, 164, 255, 129, 188, 190, - 191, 130, 131, 130, 134, 128, 132, 135, - 136, 138, 139, 140, 141, 149, 150, 162, - 163, 129, 190, 130, 131, 191, 255, 133, - 137, 141, 151, 129, 132, 142, 161, 162, - 163, 164, 255, 138, 143, 150, 159, 144, - 145, 146, 148, 152, 158, 178, 179, 177, - 179, 180, 186, 135, 142, 177, 179, 180, - 185, 187, 188, 136, 141, 181, 183, 185, - 152, 153, 190, 191, 191, 177, 190, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 177, 173, 183, - 185, 186, 187, 188, 189, 190, 150, 151, - 152, 153, 158, 160, 177, 180, 130, 132, - 141, 157, 133, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 182, 180, - 189, 190, 255, 134, 157, 137, 147, 148, - 255, 139, 141, 169, 133, 134, 178, 160, - 162, 163, 166, 167, 168, 169, 171, 176, - 184, 185, 187, 155, 151, 152, 153, 154, - 150, 160, 162, 191, 149, 151, 152, 158, - 165, 172, 173, 178, 179, 188, 176, 190, - 132, 181, 187, 128, 131, 180, 188, 189, - 255, 130, 133, 170, 171, 179, 180, 255, - 130, 161, 170, 128, 129, 162, 165, 166, - 167, 168, 173, 167, 173, 166, 169, 170, - 174, 175, 177, 178, 179, 164, 171, 172, - 179, 180, 181, 182, 183, 161, 173, 180, - 144, 146, 148, 168, 178, 179, 184, 185, - 128, 181, 187, 191, 128, 131, 179, 181, - 183, 140, 141, 144, 176, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 167, 163, 164, 165, 166, 132, - 133, 134, 159, 160, 177, 178, 255, 166, - 173, 135, 145, 146, 147, 131, 179, 188, - 128, 130, 180, 181, 182, 185, 186, 255, - 165, 129, 255, 169, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 131, 140, 141, - 188, 176, 178, 180, 183, 184, 190, 191, - 129, 171, 181, 182, 172, 173, 174, 175, - 165, 168, 172, 173, 163, 170, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 129, 128, 130, 184, 255, 135, 190, 130, - 131, 175, 176, 178, 183, 184, 187, 255, - 172, 128, 130, 167, 180, 179, 130, 128, - 129, 179, 181, 182, 190, 191, 255, 129, - 137, 138, 140, 141, 255, 180, 190, 172, - 174, 175, 177, 178, 181, 182, 183, 159, - 160, 162, 163, 170, 188, 190, 191, 128, - 129, 130, 131, 128, 151, 129, 132, 135, - 136, 139, 141, 162, 163, 166, 172, 176, - 180, 181, 183, 184, 191, 133, 128, 129, - 130, 134, 176, 185, 189, 177, 178, 179, - 186, 187, 190, 191, 255, 129, 132, 255, - 175, 190, 176, 177, 178, 181, 184, 187, - 188, 255, 129, 155, 158, 255, 189, 176, - 178, 179, 186, 187, 190, 191, 255, 129, - 255, 172, 182, 171, 173, 174, 175, 176, - 183, 166, 157, 159, 160, 161, 162, 171, - 175, 190, 176, 182, 184, 191, 169, 177, - 180, 146, 167, 170, 182, 171, 172, 189, - 190, 176, 180, 176, 182, 143, 146, 178, - 157, 158, 133, 134, 137, 168, 169, 170, - 166, 173, 165, 169, 174, 178, 187, 255, - 131, 132, 140, 169, 174, 255, 130, 132, - 128, 182, 187, 255, 173, 180, 182, 255, - 132, 155, 159, 161, 175, 128, 163, 165, - 128, 134, 136, 152, 155, 161, 163, 164, - 166, 170, 144, 150, 132, 138, 143, 187, - 191, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 139, 168, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 144, 145, 150, 155, 157, 158, 128, - 191, 160, 172, 174, 191, 128, 133, 134, - 155, 157, 191, 157, 128, 191, 143, 128, - 191, 163, 181, 128, 191, 162, 128, 191, - 142, 128, 191, 132, 133, 134, 135, 160, - 128, 191, 128, 255, 128, 129, 130, 132, - 133, 134, 141, 156, 157, 158, 159, 160, - 162, 164, 168, 169, 170, 172, 173, 174, - 175, 176, 179, 183, 160, 255, 128, 129, - 130, 133, 134, 135, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 160, 255, - 168, 255, 128, 129, 130, 134, 135, 141, - 156, 157, 158, 159, 160, 162, 164, 168, - 169, 170, 172, 173, 174, 175, 176, 179, - 183, 168, 255, 192, 255, 159, 139, 187, - 158, 159, 176, 255, 135, 138, 139, 187, - 188, 255, 168, 255, 153, 154, 155, 160, - 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 175, 177, 178, 179, 180, 181, - 182, 184, 185, 186, 187, 188, 189, 191, - 176, 190, 192, 255, 135, 147, 160, 188, - 128, 156, 184, 129, 255, 128, 129, 130, - 133, 134, 141, 156, 157, 158, 159, 160, - 162, 164, 168, 169, 170, 172, 173, 174, - 175, 176, 179, 183, 158, 159, 135, 255, - 148, 176, 140, 168, 132, 160, 188, 152, - 180, 144, 172, 136, 164, 192, 255, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 160, 161, 162, 164, 165, 166, - 167, 168, 169, 171, 172, 173, 174, 175, - 176, 178, 179, 180, 181, 182, 183, 185, - 186, 187, 188, 189, 190, 128, 191, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 160, 161, 162, 164, 165, 166, - 167, 168, 169, 171, 172, 173, 174, 175, - 176, 178, 179, 180, 181, 182, 183, 185, - 186, 187, 188, 189, 190, 128, 191, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 128, 156, 160, 255, 136, 164, - 175, 176, 255, 128, 141, 143, 191, 128, - 129, 152, 155, 156, 130, 191, 140, 141, - 128, 138, 144, 167, 175, 191, 128, 159, - 176, 191, 157, 128, 191, 185, 128, 191, - 128, 137, 138, 141, 142, 191, 128, 191, - 165, 177, 178, 179, 180, 181, 182, 184, - 185, 186, 187, 188, 189, 191, 128, 175, - 176, 190, 192, 255, 128, 159, 160, 188, - 189, 191, 128, 156, 184, 129, 255, 148, - 176, 140, 168, 132, 160, 188, 152, 180, - 144, 172, 136, 164, 192, 255, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 128, 156, 160, 255, 136, 164, 175, - 176, 255, 135, 138, 139, 187, 188, 191, - 192, 255, 187, 191, 128, 190, 128, 190, - 188, 128, 175, 190, 191, 145, 155, 157, - 159, 128, 191, 130, 135, 128, 191, 189, - 128, 191, 128, 129, 130, 131, 132, 191, - 178, 128, 191, 128, 159, 164, 191, 133, - 128, 191, 128, 178, 187, 191, 135, 142, - 143, 145, 146, 149, 150, 153, 154, 155, - 164, 128, 191, 128, 165, 166, 191, 144, - 145, 150, 155, 157, 158, 159, 135, 166, - 191, 133, 128, 191, 128, 130, 131, 132, - 133, 137, 138, 139, 140, 191, 174, 188, - 128, 129, 130, 131, 132, 133, 134, 144, - 145, 165, 166, 169, 170, 175, 176, 184, - 185, 191, 128, 132, 170, 129, 135, 136, - 191, 181, 186, 128, 191, 144, 128, 148, - 149, 150, 151, 191, 128, 132, 133, 135, - 136, 138, 139, 143, 144, 191, 163, 128, - 179, 180, 182, 183, 191, 128, 129, 191, - 166, 176, 191, 128, 151, 152, 158, 159, - 178, 179, 185, 186, 187, 188, 190, 128, - 191, 160, 128, 191, 128, 129, 135, 132, - 134, 128, 175, 157, 128, 191, 143, 128, - 191, 163, 181, 128, 191, 162, 128, 191, - 142, 128, 191, 132, 133, 134, 135, 160, - 128, 191, 0, 127, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 167, 169, 171, - 173, 174, 175, 176, 177, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 166, 170, 172, 178, 150, 153, - 155, 163, 165, 167, 169, 173, 153, 155, - 148, 161, 163, 255, 189, 132, 185, 144, - 152, 161, 164, 255, 188, 129, 131, 190, - 255, 133, 134, 137, 138, 142, 150, 152, - 161, 164, 255, 131, 134, 137, 138, 142, - 144, 146, 175, 178, 180, 182, 255, 134, - 138, 142, 161, 164, 255, 188, 129, 131, - 190, 191, 128, 132, 135, 136, 139, 141, - 150, 151, 162, 163, 130, 190, 191, 151, - 128, 130, 134, 136, 138, 141, 128, 131, - 190, 255, 133, 137, 142, 148, 151, 161, - 164, 255, 128, 132, 134, 136, 138, 141, - 149, 150, 162, 163, 129, 131, 190, 255, - 133, 137, 142, 150, 152, 161, 164, 255, - 130, 131, 138, 150, 143, 148, 152, 159, - 178, 179, 177, 179, 186, 135, 142, 177, - 179, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 177, 191, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 173, 183, 185, - 190, 150, 153, 158, 160, 177, 180, 130, - 141, 157, 132, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 180, 255, - 148, 156, 158, 255, 139, 141, 169, 133, - 134, 160, 171, 176, 187, 151, 155, 160, - 162, 191, 149, 158, 165, 188, 176, 190, - 128, 132, 180, 255, 133, 170, 180, 255, - 128, 130, 161, 173, 166, 179, 164, 183, - 173, 144, 146, 148, 168, 178, 180, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 128, 131, 157, 179, - 181, 183, 144, 176, 164, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 163, 167, 128, 129, 180, 255, - 134, 159, 178, 255, 166, 173, 135, 147, - 128, 131, 179, 255, 129, 164, 166, 255, - 169, 182, 131, 188, 140, 141, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 175, - 181, 182, 163, 170, 172, 173, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 128, 130, 184, 255, 135, 190, 131, 175, - 187, 255, 128, 130, 167, 180, 179, 128, - 130, 179, 255, 129, 137, 141, 255, 190, - 172, 183, 159, 170, 188, 128, 131, 190, - 191, 151, 128, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 191, - 128, 134, 176, 255, 132, 255, 175, 181, - 184, 255, 129, 155, 158, 255, 129, 255, - 171, 183, 157, 171, 175, 182, 184, 191, - 146, 167, 169, 182, 171, 172, 189, 190, - 176, 180, 176, 182, 145, 190, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 165, 169, 173, 178, 187, 255, 131, - 132, 140, 169, 174, 255, 130, 132, 128, - 182, 187, 255, 173, 180, 182, 255, 132, - 155, 159, 161, 175, 128, 163, 165, 128, - 134, 136, 152, 155, 161, 163, 164, 166, - 170, 144, 150, 132, 138, 145, 146, 151, - 166, 169, 128, 255, 176, 255, 131, 137, - 191, 145, 189, 135, 129, 130, 132, 133, - 144, 154, 176, 139, 159, 150, 156, 159, - 164, 167, 168, 170, 173, 145, 176, 255, - 139, 255, 166, 176, 171, 179, 160, 161, - 163, 164, 165, 166, 167, 169, 171, 172, - 173, 174, 175, 176, 177, 178, 179, 180, - 181, 182, 183, 184, 185, 186, 187, 188, - 189, 190, 191, 168, 170, 150, 153, 155, - 163, 165, 167, 169, 173, 153, 155, 148, - 161, 163, 255, 131, 187, 189, 132, 185, - 190, 255, 141, 144, 129, 136, 145, 151, - 152, 161, 162, 163, 164, 255, 129, 188, - 190, 130, 131, 191, 255, 141, 151, 129, - 132, 133, 134, 137, 138, 142, 161, 162, - 163, 164, 255, 131, 188, 129, 130, 190, - 255, 145, 181, 129, 130, 131, 134, 135, - 136, 137, 138, 139, 141, 142, 175, 176, - 177, 178, 255, 134, 138, 141, 129, 136, - 142, 161, 162, 163, 164, 255, 129, 188, - 130, 131, 190, 191, 128, 141, 129, 132, - 135, 136, 139, 140, 150, 151, 162, 163, - 130, 190, 191, 128, 141, 151, 129, 130, - 134, 136, 138, 140, 128, 129, 131, 190, - 255, 133, 137, 129, 132, 142, 148, 151, - 161, 164, 255, 129, 188, 190, 191, 130, - 131, 130, 134, 128, 132, 135, 136, 138, - 139, 140, 141, 149, 150, 162, 163, 129, - 190, 130, 131, 191, 255, 133, 137, 141, - 151, 129, 132, 142, 161, 162, 163, 164, - 255, 138, 143, 150, 159, 144, 145, 146, - 148, 152, 158, 178, 179, 177, 179, 180, - 186, 135, 142, 177, 179, 180, 185, 187, - 188, 136, 141, 181, 183, 185, 152, 153, - 190, 191, 191, 177, 190, 128, 132, 134, - 135, 141, 151, 153, 188, 134, 128, 129, - 130, 141, 156, 157, 158, 159, 160, 162, - 164, 168, 169, 170, 172, 173, 174, 175, - 176, 179, 183, 177, 173, 183, 185, 186, - 187, 188, 189, 190, 150, 151, 152, 153, - 158, 160, 177, 180, 130, 132, 141, 157, - 133, 134, 157, 159, 146, 148, 178, 180, - 146, 147, 178, 179, 182, 180, 189, 190, - 255, 134, 157, 137, 147, 148, 255, 139, - 141, 169, 133, 134, 178, 160, 162, 163, - 166, 167, 168, 169, 171, 176, 184, 185, - 187, 155, 151, 152, 153, 154, 150, 160, - 162, 191, 149, 151, 152, 158, 165, 172, - 173, 178, 179, 188, 176, 190, 132, 181, - 187, 128, 131, 180, 188, 189, 255, 130, - 133, 170, 171, 179, 180, 255, 130, 161, - 170, 128, 129, 162, 165, 166, 167, 168, - 173, 167, 173, 166, 169, 170, 174, 175, - 177, 178, 179, 164, 171, 172, 179, 180, - 181, 182, 183, 161, 173, 180, 144, 146, - 148, 168, 178, 179, 184, 185, 128, 181, - 187, 191, 128, 131, 179, 181, 183, 140, - 141, 144, 176, 175, 177, 191, 160, 191, - 128, 130, 170, 175, 153, 154, 153, 154, - 155, 160, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 175, 175, 178, 180, - 189, 158, 159, 176, 177, 130, 134, 139, - 167, 163, 164, 165, 166, 132, 133, 134, - 159, 160, 177, 178, 255, 166, 173, 135, - 145, 146, 147, 131, 179, 188, 128, 130, - 180, 181, 182, 185, 186, 255, 165, 129, - 255, 169, 174, 175, 176, 177, 178, 179, - 180, 181, 182, 131, 140, 141, 188, 176, - 178, 180, 183, 184, 190, 191, 129, 171, - 181, 182, 172, 173, 174, 175, 165, 168, - 172, 173, 163, 170, 172, 184, 190, 158, - 128, 143, 160, 175, 144, 145, 150, 155, - 157, 158, 159, 135, 139, 141, 168, 171, - 189, 160, 182, 186, 191, 129, 131, 133, - 134, 140, 143, 184, 186, 165, 166, 128, - 129, 130, 132, 133, 134, 135, 136, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 156, 176, 178, 129, 128, - 130, 184, 255, 135, 190, 130, 131, 175, - 176, 178, 183, 184, 187, 255, 172, 128, - 130, 167, 180, 179, 130, 128, 129, 179, - 181, 182, 190, 191, 255, 129, 137, 138, - 140, 141, 255, 180, 190, 172, 174, 175, - 177, 178, 181, 182, 183, 159, 160, 162, - 163, 170, 188, 190, 191, 128, 129, 130, - 131, 128, 151, 129, 132, 135, 136, 139, - 141, 162, 163, 166, 172, 176, 180, 181, - 183, 184, 191, 133, 128, 129, 130, 134, - 176, 185, 189, 177, 178, 179, 186, 187, - 190, 191, 255, 129, 132, 255, 175, 190, - 176, 177, 178, 181, 184, 187, 188, 255, - 129, 155, 158, 255, 189, 176, 178, 179, - 186, 187, 190, 191, 255, 129, 255, 172, - 182, 171, 173, 174, 175, 176, 183, 166, - 157, 159, 160, 161, 162, 171, 175, 190, - 176, 182, 184, 191, 169, 177, 180, 146, - 167, 170, 182, 171, 172, 189, 190, 176, - 180, 176, 182, 143, 146, 178, 157, 158, - 133, 134, 137, 168, 169, 170, 166, 173, - 165, 169, 174, 178, 187, 255, 131, 132, - 140, 169, 174, 255, 130, 132, 128, 182, - 187, 255, 173, 180, 182, 255, 132, 155, - 159, 161, 175, 128, 163, 165, 128, 134, - 136, 152, 155, 161, 163, 164, 166, 170, - 144, 150, 132, 138, 143, 187, 191, 160, - 128, 129, 132, 135, 133, 134, 160, 255, - 192, 255, 139, 168, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 144, - 145, 150, 155, 157, 158, 128, 129, 130, - 132, 133, 134, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 160, 255, 128, - 129, 130, 133, 134, 135, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 160, - 255, 168, 255, 128, 129, 130, 134, 135, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 168, 255, 192, 255, 159, 139, - 187, 158, 159, 176, 255, 135, 138, 139, - 187, 188, 255, 168, 255, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 177, 178, 179, 180, - 181, 182, 184, 185, 186, 187, 188, 189, - 191, 176, 190, 192, 255, 135, 147, 160, - 188, 128, 156, 184, 129, 255, 128, 129, - 130, 133, 134, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 158, 159, 135, - 255, 148, 176, 140, 168, 132, 160, 188, - 152, 180, 144, 172, 136, 164, 192, 255, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 160, 161, 162, 164, 165, - 166, 167, 168, 169, 171, 172, 173, 174, - 175, 176, 178, 179, 180, 181, 182, 183, - 185, 186, 187, 188, 189, 190, 128, 191, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 160, 161, 162, 164, 165, - 166, 167, 168, 169, 171, 172, 173, 174, - 175, 176, 178, 179, 180, 181, 182, 183, - 185, 186, 187, 188, 189, 190, 128, 191, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 128, 156, 160, 255, 136, - 164, 175, 176, 255, 142, 128, 191, 128, - 129, 152, 155, 156, 130, 191, 139, 141, - 128, 140, 142, 143, 144, 167, 168, 174, - 175, 191, 128, 255, 176, 255, 131, 137, - 191, 145, 189, 135, 129, 130, 132, 133, - 144, 154, 176, 139, 159, 150, 156, 159, - 164, 167, 168, 170, 173, 145, 176, 255, - 139, 255, 166, 176, 171, 179, 160, 161, - 163, 164, 165, 167, 169, 171, 173, 174, - 175, 176, 177, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 166, 170, 172, 178, 150, 153, 155, 163, - 165, 167, 169, 173, 153, 155, 148, 161, - 163, 255, 189, 132, 185, 144, 152, 161, - 164, 255, 188, 129, 131, 190, 255, 133, - 134, 137, 138, 142, 150, 152, 161, 164, - 255, 131, 134, 137, 138, 142, 144, 146, - 175, 178, 180, 182, 255, 134, 138, 142, - 161, 164, 255, 188, 129, 131, 190, 191, - 128, 132, 135, 136, 139, 141, 150, 151, - 162, 163, 130, 190, 191, 151, 128, 130, - 134, 136, 138, 141, 128, 131, 190, 255, - 133, 137, 142, 148, 151, 161, 164, 255, - 128, 132, 134, 136, 138, 141, 149, 150, - 162, 163, 129, 131, 190, 255, 133, 137, - 142, 150, 152, 161, 164, 255, 130, 131, - 138, 150, 143, 148, 152, 159, 178, 179, - 177, 179, 186, 135, 142, 177, 179, 185, - 187, 188, 136, 141, 181, 183, 185, 152, - 153, 190, 191, 177, 191, 128, 132, 134, - 135, 141, 151, 153, 188, 134, 128, 129, - 130, 141, 156, 157, 158, 159, 160, 162, - 164, 168, 169, 170, 172, 173, 174, 175, - 176, 179, 183, 173, 183, 185, 190, 150, - 153, 158, 160, 177, 180, 130, 141, 157, - 132, 134, 157, 159, 146, 148, 178, 180, - 146, 147, 178, 179, 180, 255, 148, 156, - 158, 255, 139, 141, 169, 133, 134, 160, - 171, 176, 187, 151, 155, 160, 162, 191, - 149, 158, 165, 188, 176, 190, 128, 132, - 180, 255, 133, 170, 180, 255, 128, 130, - 161, 173, 166, 179, 164, 183, 173, 144, - 146, 148, 168, 178, 180, 184, 185, 128, - 181, 187, 191, 128, 131, 179, 181, 183, - 140, 141, 144, 176, 175, 177, 191, 160, - 191, 128, 130, 170, 175, 153, 154, 153, - 154, 155, 160, 162, 163, 164, 165, 166, - 167, 168, 169, 170, 171, 175, 175, 178, - 180, 189, 158, 159, 176, 177, 130, 134, - 139, 163, 167, 128, 129, 180, 255, 134, - 159, 178, 255, 166, 173, 135, 147, 128, - 131, 179, 255, 129, 164, 166, 255, 169, - 182, 131, 188, 140, 141, 176, 178, 180, - 183, 184, 190, 191, 129, 171, 175, 181, - 182, 163, 170, 172, 173, 172, 184, 190, - 158, 128, 143, 160, 175, 144, 145, 150, - 155, 157, 158, 135, 139, 141, 168, 171, - 189, 160, 182, 186, 191, 129, 131, 133, - 134, 140, 143, 184, 186, 165, 166, 128, - 129, 130, 132, 133, 134, 135, 136, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 156, 176, 178, 128, 130, - 184, 255, 135, 190, 131, 175, 187, 255, - 128, 130, 167, 180, 179, 128, 130, 179, - 255, 129, 137, 141, 255, 190, 172, 183, - 159, 170, 188, 128, 131, 190, 191, 151, - 128, 132, 135, 136, 139, 141, 162, 163, - 166, 172, 176, 180, 181, 191, 128, 134, - 176, 255, 132, 255, 175, 181, 184, 255, - 129, 155, 158, 255, 129, 255, 171, 183, - 157, 171, 175, 182, 184, 191, 146, 167, - 169, 182, 171, 172, 189, 190, 176, 180, - 176, 182, 145, 190, 143, 146, 178, 157, - 158, 133, 134, 137, 168, 169, 170, 165, - 169, 173, 178, 187, 255, 131, 132, 140, - 169, 174, 255, 130, 132, 128, 182, 187, - 255, 173, 180, 182, 255, 132, 155, 159, - 161, 175, 128, 163, 165, 128, 134, 136, - 152, 155, 161, 163, 164, 166, 170, 144, - 150, 132, 138, 160, 128, 129, 132, 135, - 133, 134, 160, 255, 192, 255, 128, 131, - 157, 179, 181, 183, 164, 144, 145, 150, - 155, 157, 158, 159, 145, 146, 151, 166, - 169, 128, 255, 176, 255, 131, 137, 191, - 145, 189, 135, 129, 130, 132, 133, 144, - 154, 176, 139, 159, 150, 156, 159, 164, - 167, 168, 170, 173, 145, 176, 255, 139, - 255, 166, 176, 171, 179, 160, 161, 163, - 164, 165, 166, 167, 169, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 168, 170, 150, 153, 155, 163, - 165, 167, 169, 173, 153, 155, 148, 161, - 163, 255, 131, 187, 189, 132, 185, 190, - 255, 141, 144, 129, 136, 145, 151, 152, - 161, 162, 163, 164, 255, 129, 188, 190, - 130, 131, 191, 255, 141, 151, 129, 132, - 133, 134, 137, 138, 142, 161, 162, 163, - 164, 255, 131, 188, 129, 130, 190, 255, - 145, 181, 129, 130, 131, 134, 135, 136, - 137, 138, 139, 141, 142, 175, 176, 177, - 178, 255, 134, 138, 141, 129, 136, 142, - 161, 162, 163, 164, 255, 129, 188, 130, - 131, 190, 191, 128, 141, 129, 132, 135, - 136, 139, 140, 150, 151, 162, 163, 130, - 190, 191, 128, 141, 151, 129, 130, 134, - 136, 138, 140, 128, 129, 131, 190, 255, - 133, 137, 129, 132, 142, 148, 151, 161, - 164, 255, 129, 188, 190, 191, 130, 131, - 130, 134, 128, 132, 135, 136, 138, 139, - 140, 141, 149, 150, 162, 163, 129, 190, - 130, 131, 191, 255, 133, 137, 141, 151, - 129, 132, 142, 161, 162, 163, 164, 255, - 138, 143, 150, 159, 144, 145, 146, 148, - 152, 158, 178, 179, 177, 179, 180, 186, - 135, 142, 177, 179, 180, 185, 187, 188, - 136, 141, 181, 183, 185, 152, 153, 190, - 191, 191, 177, 190, 128, 132, 134, 135, - 141, 151, 153, 188, 134, 128, 129, 130, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 177, 173, 183, 185, 186, 187, - 188, 189, 190, 150, 151, 152, 153, 158, - 160, 177, 180, 130, 132, 141, 157, 133, - 134, 157, 159, 146, 148, 178, 180, 146, - 147, 178, 179, 182, 180, 189, 190, 255, - 134, 157, 137, 147, 148, 255, 139, 141, - 169, 133, 134, 178, 160, 162, 163, 166, - 167, 168, 169, 171, 176, 184, 185, 187, - 155, 151, 152, 153, 154, 150, 160, 162, - 191, 149, 151, 152, 158, 165, 172, 173, - 178, 179, 188, 176, 190, 132, 181, 187, - 128, 131, 180, 188, 189, 255, 130, 133, - 170, 171, 179, 180, 255, 130, 161, 170, - 128, 129, 162, 165, 166, 167, 168, 173, - 167, 173, 166, 169, 170, 174, 175, 177, - 178, 179, 164, 171, 172, 179, 180, 181, - 182, 183, 161, 173, 180, 144, 146, 148, - 168, 178, 179, 184, 185, 128, 181, 187, - 191, 128, 131, 179, 181, 183, 140, 141, - 144, 176, 175, 177, 191, 160, 191, 128, - 130, 170, 175, 153, 154, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 175, 178, 180, 189, - 158, 159, 176, 177, 130, 134, 139, 167, - 163, 164, 165, 166, 132, 133, 134, 159, - 160, 177, 178, 255, 166, 173, 135, 145, - 146, 147, 131, 179, 188, 128, 130, 180, - 181, 182, 185, 186, 255, 165, 129, 255, - 169, 174, 175, 176, 177, 178, 179, 180, - 181, 182, 131, 140, 141, 188, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 181, - 182, 172, 173, 174, 175, 165, 168, 172, - 173, 163, 170, 172, 184, 190, 158, 128, - 143, 160, 175, 144, 145, 150, 155, 157, - 158, 159, 135, 139, 141, 168, 171, 189, - 160, 182, 186, 191, 129, 131, 133, 134, - 140, 143, 184, 186, 165, 166, 128, 129, - 130, 132, 133, 134, 135, 136, 139, 140, - 141, 144, 145, 146, 147, 150, 151, 152, - 153, 154, 156, 176, 178, 129, 128, 130, - 184, 255, 135, 190, 130, 131, 175, 176, - 178, 183, 184, 187, 255, 172, 128, 130, - 167, 180, 179, 130, 128, 129, 179, 181, - 182, 190, 191, 255, 129, 137, 138, 140, - 141, 255, 180, 190, 172, 174, 175, 177, - 178, 181, 182, 183, 159, 160, 162, 163, - 170, 188, 190, 191, 128, 129, 130, 131, - 128, 151, 129, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 183, - 184, 191, 133, 128, 129, 130, 134, 176, - 185, 189, 177, 178, 179, 186, 187, 190, - 191, 255, 129, 132, 255, 175, 190, 176, - 177, 178, 181, 184, 187, 188, 255, 129, - 155, 158, 255, 189, 176, 178, 179, 186, - 187, 190, 191, 255, 129, 255, 172, 182, - 171, 173, 174, 175, 176, 183, 166, 157, - 159, 160, 161, 162, 171, 175, 190, 176, - 182, 184, 191, 169, 177, 180, 146, 167, - 170, 182, 171, 172, 189, 190, 176, 180, - 176, 182, 143, 146, 178, 157, 158, 133, - 134, 137, 168, 169, 170, 166, 173, 165, - 169, 174, 178, 187, 255, 131, 132, 140, - 169, 174, 255, 130, 132, 128, 182, 187, - 255, 173, 180, 182, 255, 132, 155, 159, - 161, 175, 128, 163, 165, 128, 134, 136, - 152, 155, 161, 163, 164, 166, 170, 144, - 150, 132, 138, 143, 187, 191, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 139, 168, 128, 159, 160, 175, 176, - 191, 157, 128, 191, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 167, 169, 171, - 173, 174, 175, 176, 177, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 166, 170, 172, 178, 150, 153, - 155, 163, 165, 167, 169, 173, 153, 155, - 148, 161, 163, 255, 189, 132, 185, 144, - 152, 161, 164, 255, 188, 129, 131, 190, - 255, 133, 134, 137, 138, 142, 150, 152, - 161, 164, 255, 131, 134, 137, 138, 142, - 144, 146, 175, 178, 180, 182, 255, 134, - 138, 142, 161, 164, 255, 188, 129, 131, - 190, 191, 128, 132, 135, 136, 139, 141, - 150, 151, 162, 163, 130, 190, 191, 151, - 128, 130, 134, 136, 138, 141, 128, 131, - 190, 255, 133, 137, 142, 148, 151, 161, - 164, 255, 128, 132, 134, 136, 138, 141, - 149, 150, 162, 163, 129, 131, 190, 255, - 133, 137, 142, 150, 152, 161, 164, 255, - 130, 131, 138, 150, 143, 148, 152, 159, - 178, 179, 177, 179, 186, 135, 142, 177, - 179, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 177, 191, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 173, 183, 185, - 190, 150, 153, 158, 160, 177, 180, 130, - 141, 157, 132, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 180, 255, - 148, 156, 158, 255, 139, 141, 169, 133, - 134, 160, 171, 176, 187, 151, 155, 160, - 162, 191, 149, 158, 165, 188, 176, 190, - 128, 132, 180, 255, 133, 170, 180, 255, - 128, 130, 161, 173, 166, 179, 164, 183, - 173, 144, 146, 148, 168, 178, 180, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 128, 131, 157, 179, - 181, 183, 144, 176, 164, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 163, 167, 128, 129, 180, 255, - 134, 159, 178, 255, 166, 173, 135, 147, - 128, 131, 179, 255, 129, 164, 166, 255, - 169, 182, 131, 188, 140, 141, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 175, - 181, 182, 163, 170, 172, 173, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 128, 130, 184, 255, 135, 190, 131, 175, - 187, 255, 128, 130, 167, 180, 179, 128, - 130, 179, 255, 129, 137, 141, 255, 190, - 172, 183, 159, 170, 188, 128, 131, 190, - 191, 151, 128, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 191, - 128, 134, 176, 255, 132, 255, 175, 181, - 184, 255, 129, 155, 158, 255, 129, 255, - 171, 183, 157, 171, 175, 182, 184, 191, - 146, 167, 169, 182, 171, 172, 189, 190, - 176, 180, 176, 182, 145, 190, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 165, 169, 173, 178, 187, 255, 131, - 132, 140, 169, 174, 255, 130, 132, 128, - 182, 187, 255, 173, 180, 182, 255, 132, - 155, 159, 161, 175, 128, 163, 165, 128, - 134, 136, 152, 155, 161, 163, 164, 166, - 170, 144, 150, 132, 138, 145, 146, 151, - 166, 169, 139, 168, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 144, - 145, 150, 155, 157, 158, 141, 144, 129, - 136, 145, 151, 152, 161, 162, 163, 164, - 255, 129, 188, 190, 130, 131, 191, 255, - 141, 151, 129, 132, 133, 134, 137, 138, - 142, 161, 162, 163, 164, 255, 131, 188, - 129, 130, 190, 255, 145, 181, 129, 130, - 131, 134, 135, 136, 137, 138, 139, 141, - 142, 175, 176, 177, 178, 255, 134, 138, - 141, 129, 136, 142, 161, 162, 163, 164, - 255, 129, 188, 130, 131, 190, 191, 128, - 141, 129, 132, 135, 136, 139, 140, 150, - 151, 162, 163, 130, 190, 191, 128, 141, - 151, 129, 130, 134, 136, 138, 140, 128, - 129, 131, 190, 255, 133, 137, 129, 132, - 142, 148, 151, 161, 164, 255, 129, 188, - 190, 191, 130, 131, 130, 134, 128, 132, - 135, 136, 138, 139, 140, 141, 149, 150, - 162, 163, 129, 190, 130, 131, 191, 255, - 133, 137, 141, 151, 129, 132, 142, 161, - 162, 163, 164, 255, 138, 143, 150, 159, - 144, 145, 146, 148, 152, 158, 178, 179, - 177, 179, 180, 186, 135, 142, 177, 179, - 180, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 191, 177, 190, - 128, 132, 134, 135, 141, 151, 153, 188, - 134, 128, 129, 130, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 177, 173, - 183, 185, 186, 187, 188, 189, 190, 150, - 151, 152, 153, 158, 160, 177, 180, 130, - 132, 141, 157, 133, 134, 157, 159, 146, - 148, 178, 180, 146, 147, 178, 179, 182, - 180, 189, 190, 255, 134, 157, 137, 147, - 148, 255, 139, 141, 169, 133, 134, 178, - 160, 162, 163, 166, 167, 168, 169, 171, - 176, 184, 185, 187, 155, 151, 152, 153, - 154, 150, 160, 162, 191, 149, 151, 152, - 158, 165, 172, 173, 178, 179, 188, 176, - 190, 132, 181, 187, 128, 131, 180, 188, - 189, 255, 130, 133, 170, 171, 179, 180, - 255, 130, 161, 170, 128, 129, 162, 165, - 166, 167, 168, 173, 167, 173, 166, 169, - 170, 174, 175, 177, 178, 179, 164, 171, - 172, 179, 180, 181, 182, 183, 161, 173, - 180, 144, 146, 148, 168, 178, 179, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 144, 176, 175, 177, - 191, 160, 191, 128, 130, 170, 175, 153, - 154, 153, 154, 155, 160, 162, 163, 164, - 165, 166, 167, 168, 169, 170, 171, 175, - 175, 178, 180, 189, 158, 159, 176, 177, - 130, 134, 139, 167, 163, 164, 165, 166, - 132, 133, 134, 159, 160, 177, 178, 255, - 166, 173, 135, 145, 146, 147, 131, 179, - 188, 128, 130, 180, 181, 182, 185, 186, - 255, 165, 129, 255, 169, 174, 175, 176, - 177, 178, 179, 180, 181, 182, 131, 140, - 141, 188, 176, 178, 180, 183, 184, 190, - 191, 129, 171, 181, 182, 172, 173, 174, - 175, 165, 168, 172, 173, 163, 170, 172, - 184, 190, 158, 128, 143, 160, 175, 144, - 145, 150, 155, 157, 158, 159, 135, 139, - 141, 168, 171, 189, 160, 182, 186, 191, - 129, 131, 133, 134, 140, 143, 184, 186, - 165, 166, 128, 129, 130, 132, 133, 134, - 135, 136, 139, 140, 141, 144, 145, 146, - 147, 150, 151, 152, 153, 154, 156, 176, - 178, 129, 128, 130, 184, 255, 135, 190, - 130, 131, 175, 176, 178, 183, 184, 187, - 255, 172, 128, 130, 167, 180, 179, 130, - 128, 129, 179, 181, 182, 190, 191, 255, - 129, 137, 138, 140, 141, 255, 180, 190, - 172, 174, 175, 177, 178, 181, 182, 183, - 159, 160, 162, 163, 170, 188, 190, 191, - 128, 129, 130, 131, 128, 151, 129, 132, - 135, 136, 139, 141, 162, 163, 166, 172, - 176, 180, 181, 183, 184, 191, 133, 128, - 129, 130, 134, 176, 185, 189, 177, 178, - 179, 186, 187, 190, 191, 255, 129, 132, - 255, 175, 190, 176, 177, 178, 181, 184, - 187, 188, 255, 129, 155, 158, 255, 189, - 176, 178, 179, 186, 187, 190, 191, 255, - 129, 255, 172, 182, 171, 173, 174, 175, - 176, 183, 166, 157, 159, 160, 161, 162, - 171, 175, 190, 176, 182, 184, 191, 169, - 177, 180, 146, 167, 170, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 166, 173, 165, 169, 174, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 143, - 187, 191, 160, 128, 129, 132, 135, 133, - 134, 160, 255, 192, 255, 185, 128, 191, - 128, 137, 138, 141, 142, 191, 128, 191, - 165, 177, 178, 179, 180, 181, 182, 184, - 185, 186, 187, 188, 189, 191, 128, 175, - 176, 190, 192, 255, 128, 159, 160, 188, - 189, 191, 128, 156, 184, 129, 255, 148, - 176, 140, 168, 132, 160, 188, 152, 180, - 144, 172, 136, 164, 192, 255, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 191, 128, 156, 161, 190, 192, - 255, 136, 164, 175, 176, 255, 135, 138, - 139, 187, 188, 191, 192, 255, 0, 127, - 192, 255, 187, 191, 128, 190, 191, 128, - 190, 188, 128, 175, 176, 189, 190, 191, - 145, 155, 157, 159, 128, 191, 130, 135, - 128, 191, 189, 128, 191, 128, 129, 130, - 131, 132, 191, 178, 128, 191, 128, 159, - 160, 163, 164, 191, 133, 128, 191, 128, - 178, 179, 186, 187, 191, 135, 142, 143, - 145, 146, 149, 150, 153, 154, 155, 164, - 128, 191, 128, 165, 166, 191, 128, 255, - 176, 255, 131, 137, 191, 145, 189, 135, - 129, 130, 132, 133, 144, 154, 176, 139, - 159, 150, 156, 159, 164, 167, 168, 170, - 173, 145, 176, 255, 139, 255, 166, 176, - 171, 179, 160, 161, 163, 164, 165, 167, - 169, 171, 173, 174, 175, 176, 177, 179, - 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 166, 170, 172, 178, - 150, 153, 155, 163, 165, 167, 169, 173, - 153, 155, 148, 161, 163, 255, 189, 132, - 185, 144, 152, 161, 164, 255, 188, 129, - 131, 190, 255, 133, 134, 137, 138, 142, - 150, 152, 161, 164, 255, 131, 134, 137, - 138, 142, 144, 146, 175, 178, 180, 182, - 255, 134, 138, 142, 161, 164, 255, 188, - 129, 131, 190, 191, 128, 132, 135, 136, - 139, 141, 150, 151, 162, 163, 130, 190, - 191, 151, 128, 130, 134, 136, 138, 141, - 128, 131, 190, 255, 133, 137, 142, 148, - 151, 161, 164, 255, 128, 132, 134, 136, - 138, 141, 149, 150, 162, 163, 129, 131, - 190, 255, 133, 137, 142, 150, 152, 161, - 164, 255, 130, 131, 138, 150, 143, 148, - 152, 159, 178, 179, 177, 179, 186, 135, - 142, 177, 179, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 177, - 191, 128, 132, 134, 135, 141, 151, 153, - 188, 134, 128, 129, 130, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 173, - 183, 185, 190, 150, 153, 158, 160, 177, - 180, 130, 141, 157, 132, 134, 157, 159, - 146, 148, 178, 180, 146, 147, 178, 179, - 180, 255, 148, 156, 158, 255, 139, 141, - 169, 133, 134, 160, 171, 176, 187, 151, - 155, 160, 162, 191, 149, 158, 165, 188, - 176, 190, 128, 132, 180, 255, 133, 170, - 180, 255, 128, 130, 161, 173, 166, 179, - 164, 183, 173, 144, 146, 148, 168, 178, - 180, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 128, 131, - 157, 179, 181, 183, 144, 176, 164, 175, - 177, 191, 160, 191, 128, 130, 170, 175, - 153, 154, 153, 154, 155, 160, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 175, 175, 178, 180, 189, 158, 159, 176, - 177, 130, 134, 139, 163, 167, 128, 129, - 180, 255, 134, 159, 178, 255, 166, 173, - 135, 147, 128, 131, 179, 255, 129, 164, - 166, 255, 169, 182, 131, 188, 140, 141, - 176, 178, 180, 183, 184, 190, 191, 129, - 171, 175, 181, 182, 163, 170, 172, 173, - 172, 184, 190, 158, 128, 143, 160, 175, - 144, 145, 150, 155, 157, 158, 159, 135, - 139, 141, 168, 171, 189, 160, 182, 186, - 191, 129, 131, 133, 134, 140, 143, 184, - 186, 165, 166, 128, 129, 130, 132, 133, - 134, 135, 136, 139, 140, 141, 144, 145, - 146, 147, 150, 151, 152, 153, 154, 156, - 176, 178, 128, 130, 184, 255, 135, 190, - 131, 175, 187, 255, 128, 130, 167, 180, - 179, 128, 130, 179, 255, 129, 137, 141, - 255, 190, 172, 183, 159, 170, 188, 128, - 131, 190, 191, 151, 128, 132, 135, 136, - 139, 141, 162, 163, 166, 172, 176, 180, - 181, 191, 128, 134, 176, 255, 132, 255, - 175, 181, 184, 255, 129, 155, 158, 255, - 129, 255, 171, 183, 157, 171, 175, 182, - 184, 191, 146, 167, 169, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 145, 190, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 145, - 146, 151, 166, 169, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 141, 144, 129, 136, - 145, 151, 152, 161, 162, 163, 164, 255, - 129, 188, 190, 130, 131, 191, 255, 141, - 151, 129, 132, 133, 134, 137, 138, 142, - 161, 162, 163, 164, 255, 131, 188, 129, - 130, 190, 255, 145, 181, 129, 130, 131, - 134, 135, 136, 137, 138, 139, 141, 142, - 175, 176, 177, 178, 255, 134, 138, 141, - 129, 136, 142, 161, 162, 163, 164, 255, - 129, 188, 130, 131, 190, 191, 128, 141, - 129, 132, 135, 136, 139, 140, 150, 151, - 162, 163, 130, 190, 191, 128, 141, 151, - 129, 130, 134, 136, 138, 140, 128, 129, - 131, 190, 255, 133, 137, 129, 132, 142, - 148, 151, 161, 164, 255, 129, 188, 190, - 191, 130, 131, 130, 134, 128, 132, 135, - 136, 138, 139, 140, 141, 149, 150, 162, - 163, 129, 190, 130, 131, 191, 255, 133, - 137, 141, 151, 129, 132, 142, 161, 162, - 163, 164, 255, 138, 143, 150, 159, 144, - 145, 146, 148, 152, 158, 178, 179, 177, - 179, 180, 186, 135, 142, 177, 179, 180, - 185, 187, 188, 136, 141, 181, 183, 185, - 152, 153, 190, 191, 191, 177, 190, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 177, 173, 183, - 185, 186, 187, 188, 189, 190, 150, 151, - 152, 153, 158, 160, 177, 180, 130, 132, - 141, 157, 133, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 182, 180, - 189, 190, 255, 134, 157, 137, 147, 148, - 255, 139, 141, 169, 133, 134, 178, 160, - 162, 163, 166, 167, 168, 169, 171, 176, - 184, 185, 187, 155, 151, 152, 153, 154, - 150, 160, 162, 191, 149, 151, 152, 158, - 165, 172, 173, 178, 179, 188, 176, 190, - 132, 181, 187, 128, 131, 180, 188, 189, - 255, 130, 133, 170, 171, 179, 180, 255, - 130, 161, 170, 128, 129, 162, 165, 166, - 167, 168, 173, 167, 173, 166, 169, 170, - 174, 175, 177, 178, 179, 164, 171, 172, - 179, 180, 181, 182, 183, 161, 173, 180, - 144, 146, 148, 168, 178, 179, 184, 185, - 128, 181, 187, 191, 128, 131, 179, 181, - 183, 140, 141, 144, 176, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 167, 163, 164, 165, 166, 132, - 133, 134, 159, 160, 177, 178, 255, 166, - 173, 135, 145, 146, 147, 131, 179, 188, - 128, 130, 180, 181, 182, 185, 186, 255, - 165, 129, 255, 169, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 131, 140, 141, - 188, 176, 178, 180, 183, 184, 190, 191, - 129, 171, 181, 182, 172, 173, 174, 175, - 165, 168, 172, 173, 163, 170, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 129, 128, 130, 184, 255, 135, 190, 130, - 131, 175, 176, 178, 183, 184, 187, 255, - 172, 128, 130, 167, 180, 179, 130, 128, - 129, 179, 181, 182, 190, 191, 255, 129, - 137, 138, 140, 141, 255, 180, 190, 172, - 174, 175, 177, 178, 181, 182, 183, 159, - 160, 162, 163, 170, 188, 190, 191, 128, - 129, 130, 131, 128, 151, 129, 132, 135, - 136, 139, 141, 162, 163, 166, 172, 176, - 180, 181, 183, 184, 191, 133, 128, 129, - 130, 134, 176, 185, 189, 177, 178, 179, - 186, 187, 190, 191, 255, 129, 132, 255, - 175, 190, 176, 177, 178, 181, 184, 187, - 188, 255, 129, 155, 158, 255, 189, 176, - 178, 179, 186, 187, 190, 191, 255, 129, - 255, 172, 182, 171, 173, 174, 175, 176, - 183, 166, 157, 159, 160, 161, 162, 171, - 175, 190, 176, 182, 184, 191, 169, 177, - 180, 146, 167, 170, 182, 171, 172, 189, - 190, 176, 180, 176, 182, 143, 146, 178, - 157, 158, 133, 134, 137, 168, 169, 170, - 166, 173, 165, 169, 174, 178, 187, 255, - 131, 132, 140, 169, 174, 255, 130, 132, - 128, 182, 187, 255, 173, 180, 182, 255, - 132, 155, 159, 161, 175, 128, 163, 165, - 128, 134, 136, 152, 155, 161, 163, 164, - 166, 170, 144, 150, 132, 138, 143, 187, - 191, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 139, 168, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 144, 145, 150, 155, 157, 158, 144, - 145, 150, 155, 157, 158, 159, 135, 166, - 191, 133, 128, 191, 128, 130, 131, 132, - 133, 137, 138, 139, 140, 191, 174, 188, - 128, 129, 130, 131, 132, 133, 134, 144, - 145, 165, 166, 169, 170, 175, 176, 184, - 185, 191, 128, 132, 170, 129, 135, 136, - 191, 181, 186, 128, 191, 144, 128, 148, - 149, 150, 151, 191, 128, 132, 133, 135, - 136, 138, 139, 143, 144, 191, 163, 128, - 179, 180, 182, 183, 191, 128, 129, 191, - 166, 176, 191, 128, 151, 152, 158, 159, - 178, 179, 185, 186, 187, 188, 190, 128, - 191, 160, 128, 191, 128, 130, 131, 135, - 191, 129, 134, 136, 190, 128, 159, 160, - 191, 128, 175, 176, 255, 10, 13, 127, - 194, 216, 219, 220, 224, 225, 226, 234, - 235, 236, 237, 239, 240, 243, 0, 31, - 128, 191, 192, 223, 227, 238, 241, 247, - 248, 255, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 239, 240, 243, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 239, 240, 243, 194, 216, - 219, 220, 224, 225, 226, 234, 235, 236, - 237, 239, 240, 243, 32, 126, 192, 223, - 227, 238, 241, 247, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 235, 236, 237, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 237, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 237, 239, 240, 243, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 235, 236, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 204, 205, 210, - 214, 215, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 234, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 204, 205, 210, - 214, 215, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 234, 237, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 237, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 237, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, -} - -var _graphclust_single_lengths []byte = []byte{ - 0, 1, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 26, 0, - 0, 0, 1, 1, 1, 0, 0, 2, - 1, 0, 1, 1, 0, 2, 0, 0, - 2, 0, 2, 1, 0, 1, 0, 3, - 0, 0, 1, 21, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 5, 2, 6, 0, 1, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 2, 1, 1, 0, 3, 1, 0, 7, - 5, 1, 1, 0, 1, 0, 23, 0, - 0, 0, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 4, 0, 0, - 0, 0, 1, 0, 6, 0, 0, 0, - 0, 0, 1, 3, 0, 0, 0, 3, - 0, 0, 0, 0, 1, 1, 0, 1, - 0, 1, 0, 0, 0, 29, 0, 0, - 0, 3, 2, 3, 2, 2, 2, 3, - 2, 2, 3, 3, 1, 2, 4, 2, - 2, 4, 4, 2, 0, 2, 0, 3, - 1, 0, 1, 21, 1, 0, 4, 0, - 0, 0, 1, 2, 0, 1, 1, 1, - 4, 0, 3, 1, 3, 2, 0, 3, - 0, 5, 2, 0, 0, 1, 0, 2, - 0, 0, 15, 0, 0, 0, 4, 0, - 0, 0, 3, 1, 0, 4, 1, 4, - 4, 3, 1, 0, 7, 5, 1, 1, - 0, 1, 0, 23, 1, 0, 1, 1, - 1, 1, 0, 2, 1, 3, 2, 0, - 1, 3, 1, 2, 0, 1, 0, 2, - 1, 2, 3, 4, 0, 0, 0, 1, - 0, 6, 2, 0, 0, 0, 0, 1, - 3, 0, 0, 0, 1, 0, 1, 4, - 0, 0, 0, 1, 1, 1, 4, 0, - 0, 0, 6, 0, 1, 1, 0, 0, - 0, 1, 1, 0, 1, 0, 1, 0, - 0, 0, 26, 0, 0, 0, 1, 1, - 1, 0, 0, 2, 1, 0, 1, 1, - 0, 2, 0, 0, 2, 0, 2, 1, - 0, 1, 0, 3, 0, 0, 1, 21, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 1, 0, 5, 2, 6, - 0, 1, 0, 1, 0, 2, 0, 0, - 15, 0, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 0, 2, 1, 1, 0, - 3, 1, 0, 7, 5, 1, 1, 0, - 1, 0, 23, 0, 0, 0, 0, 1, - 0, 0, 1, 0, 1, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 4, 0, 0, 0, 0, 1, 0, - 6, 0, 0, 0, 0, 0, 1, 3, - 0, 0, 0, 3, 0, 0, 0, 0, - 1, 1, 0, 1, 0, 1, 0, 0, - 0, 29, 0, 0, 0, 3, 2, 3, - 2, 2, 2, 3, 2, 2, 3, 3, - 1, 2, 4, 2, 2, 4, 4, 2, - 0, 2, 0, 3, 1, 0, 1, 21, - 1, 0, 4, 0, 0, 0, 1, 2, - 0, 1, 1, 1, 4, 0, 3, 1, - 3, 2, 0, 3, 0, 5, 2, 0, - 0, 1, 0, 2, 0, 0, 15, 0, - 0, 0, 4, 0, 0, 0, 3, 1, - 0, 4, 1, 4, 4, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 1, 0, 1, 1, 1, 1, 0, 2, - 1, 3, 2, 0, 1, 3, 1, 2, - 0, 1, 0, 2, 1, 2, 3, 4, - 0, 0, 0, 1, 0, 6, 2, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 1, 0, 1, 4, 0, 0, 0, 1, - 1, 1, 4, 0, 0, 0, 6, 0, - 0, 0, 1, 1, 2, 1, 1, 5, - 0, 24, 0, 24, 0, 0, 23, 0, - 0, 1, 0, 2, 0, 0, 0, 28, - 0, 3, 23, 2, 0, 2, 2, 3, - 2, 2, 2, 0, 54, 54, 27, 1, - 0, 5, 2, 0, 1, 1, 0, 0, - 14, 0, 3, 2, 2, 3, 2, 2, - 2, 54, 54, 27, 1, 0, 2, 0, - 1, 4, 2, 1, 0, 1, 0, 1, - 0, 11, 0, 7, 1, 0, 1, 0, - 2, 3, 2, 1, 0, 1, 1, 3, - 0, 1, 3, 0, 1, 1, 2, 1, - 1, 5, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 26, - 0, 0, 0, 1, 1, 1, 0, 0, - 2, 1, 0, 1, 1, 0, 2, 0, - 0, 2, 0, 2, 1, 0, 1, 0, - 3, 0, 0, 1, 21, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 1, 0, 5, 2, 6, 0, 1, 0, - 1, 0, 2, 0, 0, 15, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 0, 2, 1, 1, 0, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 0, 0, 0, 0, 1, 0, 0, 1, - 0, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4, 0, - 0, 0, 0, 1, 0, 6, 0, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 3, 0, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 29, 0, - 0, 0, 3, 2, 3, 2, 2, 2, - 3, 2, 2, 3, 3, 1, 2, 4, - 2, 2, 4, 4, 2, 0, 2, 0, - 3, 1, 0, 1, 21, 1, 0, 4, - 0, 0, 0, 1, 2, 0, 1, 1, - 1, 4, 0, 3, 1, 3, 2, 0, - 3, 0, 5, 2, 0, 0, 1, 0, - 2, 0, 0, 15, 0, 0, 0, 4, - 0, 0, 0, 3, 1, 0, 4, 1, - 4, 4, 3, 1, 0, 7, 5, 1, - 1, 0, 1, 0, 23, 1, 0, 1, - 1, 1, 1, 0, 2, 1, 3, 2, - 0, 1, 3, 1, 2, 0, 1, 0, - 2, 1, 2, 3, 4, 0, 0, 0, - 1, 0, 6, 2, 0, 0, 0, 0, - 1, 3, 0, 0, 0, 1, 0, 1, - 4, 0, 0, 0, 1, 1, 1, 4, - 0, 0, 0, 6, 24, 0, 24, 0, - 0, 23, 0, 0, 1, 0, 2, 0, - 0, 0, 28, 0, 3, 23, 2, 0, - 2, 2, 3, 2, 2, 2, 0, 54, - 54, 27, 1, 1, 5, 2, 0, 0, - 0, 1, 1, 0, 1, 0, 1, 0, - 0, 0, 26, 0, 0, 0, 1, 1, - 1, 0, 0, 2, 1, 0, 1, 1, - 0, 2, 0, 0, 2, 0, 2, 1, - 0, 1, 0, 3, 0, 0, 1, 21, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 1, 0, 5, 2, 0, - 0, 1, 0, 2, 0, 0, 15, 0, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 0, 2, 1, 1, 0, 3, 1, - 0, 6, 5, 1, 1, 0, 1, 0, - 23, 0, 0, 0, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 4, - 0, 0, 0, 0, 1, 0, 6, 0, - 0, 0, 0, 0, 1, 3, 0, 0, - 0, 1, 4, 0, 0, 0, 6, 1, - 7, 3, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 29, - 0, 0, 0, 3, 2, 3, 2, 2, - 2, 3, 2, 2, 3, 3, 1, 2, - 4, 2, 2, 4, 4, 2, 0, 2, - 0, 3, 1, 0, 1, 21, 1, 0, - 4, 0, 0, 0, 1, 2, 0, 1, - 1, 1, 4, 0, 3, 1, 3, 2, - 0, 3, 0, 5, 2, 0, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 4, 0, 0, 0, 3, 1, 0, 4, - 1, 4, 4, 3, 1, 0, 7, 5, - 1, 1, 0, 1, 0, 23, 1, 0, - 1, 1, 1, 1, 0, 2, 1, 3, - 2, 0, 1, 3, 1, 2, 0, 1, - 0, 2, 1, 2, 3, 4, 0, 0, - 0, 1, 0, 6, 2, 0, 0, 0, - 0, 1, 3, 0, 0, 0, 1, 0, - 1, 4, 0, 0, 0, 1, 1, 0, - 1, 0, 0, 0, 1, 1, 0, 1, - 0, 1, 0, 0, 0, 29, 0, 0, - 0, 3, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 26, 0, - 0, 0, 1, 1, 1, 0, 0, 2, - 1, 0, 1, 1, 0, 2, 0, 0, - 2, 0, 2, 1, 0, 1, 0, 3, - 0, 0, 1, 21, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 5, 2, 6, 0, 1, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 2, 1, 1, 0, 3, 1, 0, 7, - 5, 1, 1, 0, 1, 0, 23, 0, - 0, 0, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 4, 0, 0, - 0, 0, 1, 0, 6, 0, 0, 0, - 0, 0, 1, 3, 0, 0, 0, 3, - 0, 1, 1, 1, 4, 0, 0, 0, - 6, 2, 3, 2, 2, 2, 3, 2, - 2, 3, 3, 1, 2, 4, 2, 2, - 4, 4, 2, 0, 2, 0, 3, 1, - 0, 1, 21, 1, 0, 4, 0, 0, - 0, 1, 2, 0, 1, 1, 1, 4, - 0, 3, 1, 3, 2, 0, 3, 0, - 5, 2, 0, 0, 1, 0, 2, 0, - 0, 15, 0, 0, 0, 4, 0, 0, - 0, 3, 1, 0, 4, 1, 4, 4, - 3, 1, 0, 7, 5, 1, 1, 0, - 1, 0, 23, 1, 0, 1, 1, 1, - 1, 0, 2, 1, 3, 2, 0, 1, - 3, 1, 2, 0, 1, 0, 2, 1, - 2, 3, 4, 0, 0, 0, 1, 0, - 6, 2, 0, 0, 0, 0, 1, 3, - 0, 0, 0, 1, 0, 1, 4, 0, - 0, 0, 1, 0, 0, 14, 0, 3, - 2, 2, 3, 2, 2, 2, 54, 54, - 29, 1, 0, 0, 0, 0, 2, 1, - 1, 4, 2, 1, 0, 1, 0, 1, - 0, 11, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 26, - 0, 0, 0, 1, 1, 1, 0, 0, - 2, 1, 0, 1, 1, 0, 2, 0, - 0, 2, 0, 2, 1, 0, 1, 0, - 3, 0, 0, 1, 21, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 1, 0, 5, 2, 6, 0, 1, 0, - 1, 0, 2, 0, 0, 15, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 0, 2, 1, 1, 0, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 0, 0, 0, 0, 1, 0, 0, 1, - 0, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4, 0, - 0, 0, 0, 1, 0, 6, 0, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 3, 0, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 29, 0, - 0, 0, 3, 2, 3, 2, 2, 2, - 3, 2, 2, 3, 3, 1, 2, 4, - 2, 2, 4, 4, 2, 0, 2, 0, - 3, 1, 0, 1, 21, 1, 0, 4, - 0, 0, 0, 1, 2, 0, 1, 1, - 1, 4, 0, 3, 1, 3, 2, 0, - 3, 0, 5, 2, 0, 0, 1, 0, - 2, 0, 0, 15, 0, 0, 0, 4, - 0, 0, 0, 3, 1, 0, 4, 1, - 4, 4, 3, 1, 0, 7, 5, 1, - 1, 0, 1, 0, 23, 1, 0, 1, - 1, 1, 1, 0, 2, 1, 3, 2, - 0, 1, 3, 1, 2, 0, 1, 0, - 2, 1, 2, 3, 4, 0, 0, 0, - 1, 0, 6, 2, 0, 0, 0, 0, - 1, 3, 0, 0, 0, 1, 0, 1, - 4, 0, 0, 0, 1, 1, 1, 4, - 0, 0, 0, 6, 7, 1, 0, 1, - 0, 2, 3, 2, 1, 0, 1, 1, - 3, 0, 1, 5, 0, 0, 17, 20, - 20, 20, 14, 20, 20, 20, 23, 21, - 21, 21, 20, 23, 20, 20, 20, 21, - 21, 21, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, -} - -var _graphclust_range_lengths []byte = []byte{ - 0, 0, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 2, 4, - 1, 2, 1, 2, 2, 5, 6, 2, - 2, 5, 1, 3, 2, 3, 5, 2, - 3, 1, 3, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 2, 3, 1, 1, - 2, 2, 1, 2, 1, 1, 2, 1, - 2, 1, 2, 2, 2, 1, 1, 4, - 2, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 1, 2, 2, 1, 1, 2, 2, 1, - 1, 3, 2, 2, 0, 0, 2, 0, - 0, 0, 0, 1, 4, 1, 0, 2, - 1, 2, 2, 0, 2, 2, 1, 1, - 2, 6, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 2, 0, 1, 1, - 1, 1, 0, 1, 0, 3, 3, 1, - 2, 2, 2, 0, 5, 1, 1, 0, - 1, 1, 1, 1, 1, 2, 1, 1, - 4, 1, 1, 1, 1, 1, 4, 1, - 2, 2, 5, 2, 6, 2, 8, 4, - 2, 5, 0, 3, 2, 4, 1, 6, - 2, 4, 4, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 4, 4, 1, 1, - 2, 2, 2, 2, 1, 1, 6, 2, - 5, 1, 3, 3, 4, 4, 4, 4, - 2, 0, 0, 1, 1, 0, 1, 0, - 1, 1, 0, 2, 1, 1, 2, 4, - 1, 2, 4, 1, 5, 0, 3, 2, - 1, 0, 0, 2, 0, 0, 0, 0, - 1, 4, 1, 0, 2, 1, 4, 2, - 0, 4, 3, 4, 2, 2, 6, 2, - 2, 4, 1, 4, 2, 4, 1, 3, - 3, 2, 2, 0, 1, 1, 1, 0, - 1, 0, 3, 3, 1, 2, 2, 2, - 0, 5, 1, 1, 0, 1, 0, 1, - 1, 1, 0, 0, 0, 0, 1, 1, - 1, 0, 0, 1, 2, 2, 1, 1, - 1, 1, 2, 1, 1, 4, 1, 1, - 1, 1, 2, 4, 1, 2, 1, 2, - 2, 5, 6, 2, 2, 5, 1, 3, - 2, 3, 5, 2, 3, 1, 3, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 2, 3, 1, 1, 2, 2, 1, 2, - 1, 1, 2, 1, 2, 1, 2, 2, - 2, 1, 1, 4, 2, 0, 0, 0, - 1, 0, 1, 0, 1, 0, 1, 1, - 0, 2, 1, 1, 1, 2, 2, 1, - 1, 2, 2, 1, 1, 3, 2, 2, - 0, 0, 2, 0, 0, 0, 0, 1, - 4, 1, 0, 2, 1, 2, 2, 0, - 2, 2, 1, 1, 2, 6, 1, 1, - 1, 1, 2, 2, 1, 1, 1, 2, - 2, 0, 1, 1, 1, 1, 0, 1, - 0, 3, 3, 1, 2, 2, 2, 0, - 5, 1, 1, 0, 1, 1, 1, 1, - 1, 2, 1, 1, 4, 1, 1, 1, - 1, 1, 4, 1, 2, 2, 5, 2, - 6, 2, 8, 4, 2, 5, 0, 3, - 2, 4, 1, 6, 2, 4, 4, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 4, 4, 1, 1, 2, 2, 2, 2, - 1, 1, 6, 2, 5, 1, 3, 3, - 4, 4, 4, 4, 2, 0, 0, 1, - 1, 0, 1, 0, 1, 1, 0, 2, - 1, 1, 2, 4, 1, 2, 4, 1, - 5, 0, 3, 2, 1, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 4, 2, 0, 4, 3, 4, - 2, 2, 6, 2, 2, 4, 1, 4, - 2, 4, 1, 3, 3, 2, 2, 0, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 0, 1, 1, 1, 0, 0, - 0, 0, 1, 1, 1, 0, 0, 1, - 2, 3, 1, 1, 1, 1, 1, 1, - 1, 0, 1, 0, 1, 1, 0, 1, - 1, 0, 1, 0, 1, 3, 1, 2, - 2, 1, 0, 0, 1, 0, 0, 0, - 0, 0, 1, 0, 1, 1, 2, 2, - 2, 1, 3, 2, 1, 1, 3, 1, - 3, 3, 1, 0, 0, 0, 0, 0, - 1, 1, 1, 2, 2, 4, 1, 1, - 2, 1, 1, 1, 3, 1, 2, 1, - 2, 1, 2, 0, 0, 1, 1, 5, - 9, 2, 1, 3, 5, 3, 1, 6, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 2, - 4, 1, 2, 1, 2, 2, 5, 6, - 2, 2, 5, 1, 3, 2, 3, 5, - 2, 3, 1, 3, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 2, 3, 1, - 1, 2, 2, 1, 2, 1, 1, 2, - 1, 2, 1, 2, 2, 2, 1, 1, - 4, 2, 0, 0, 0, 1, 0, 1, - 0, 1, 0, 1, 1, 0, 2, 1, - 1, 1, 2, 2, 1, 1, 2, 2, - 1, 1, 3, 2, 2, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 2, 2, 0, 2, 2, 1, - 1, 2, 6, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 2, 2, 0, 1, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 1, 4, - 1, 2, 2, 5, 2, 6, 2, 8, - 4, 2, 5, 0, 3, 2, 4, 1, - 6, 2, 4, 4, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 4, 4, 1, - 1, 2, 2, 2, 2, 1, 1, 6, - 2, 5, 1, 3, 3, 4, 4, 4, - 4, 2, 0, 0, 1, 1, 0, 1, - 0, 1, 1, 0, 2, 1, 1, 2, - 4, 1, 2, 4, 1, 5, 0, 3, - 2, 1, 0, 0, 2, 0, 0, 0, - 0, 1, 4, 1, 0, 2, 1, 4, - 2, 0, 4, 3, 4, 2, 2, 6, - 2, 2, 4, 1, 4, 2, 4, 1, - 3, 3, 2, 2, 0, 1, 1, 1, - 0, 1, 0, 3, 3, 1, 2, 2, - 2, 0, 5, 1, 1, 0, 1, 0, - 1, 1, 1, 0, 0, 0, 0, 1, - 1, 1, 0, 0, 0, 1, 0, 1, - 1, 0, 1, 1, 0, 1, 0, 1, - 3, 1, 2, 2, 1, 0, 0, 1, - 0, 0, 0, 0, 0, 1, 0, 1, - 1, 2, 2, 1, 1, 5, 1, 1, - 1, 1, 2, 1, 1, 4, 1, 1, - 1, 1, 2, 4, 1, 2, 1, 2, - 2, 5, 6, 2, 2, 5, 1, 3, - 2, 3, 5, 2, 3, 1, 3, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 2, 3, 1, 1, 2, 2, 1, 2, - 1, 1, 2, 1, 2, 1, 2, 2, - 2, 1, 1, 4, 2, 0, 0, 1, - 1, 0, 1, 0, 1, 1, 0, 2, - 1, 1, 1, 2, 2, 1, 1, 2, - 2, 1, 1, 3, 2, 2, 0, 0, - 2, 0, 0, 0, 0, 1, 4, 1, - 0, 2, 1, 2, 2, 0, 2, 2, - 1, 1, 2, 6, 1, 1, 1, 1, - 2, 2, 1, 1, 1, 2, 2, 0, - 1, 1, 1, 1, 0, 1, 0, 3, - 3, 1, 2, 2, 2, 0, 5, 1, - 1, 0, 1, 1, 1, 0, 0, 0, - 0, 0, 1, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 1, - 4, 1, 2, 2, 5, 2, 6, 2, - 8, 4, 2, 5, 0, 3, 2, 4, - 1, 6, 2, 4, 4, 1, 1, 2, - 1, 2, 1, 4, 0, 0, 4, 4, - 1, 1, 2, 2, 2, 2, 1, 1, - 6, 2, 5, 1, 3, 3, 4, 4, - 4, 4, 2, 0, 0, 1, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 2, 4, 1, 2, 4, 1, 5, 0, - 3, 2, 1, 0, 0, 2, 0, 0, - 0, 0, 1, 4, 1, 0, 2, 1, - 4, 2, 0, 4, 3, 4, 2, 2, - 6, 2, 2, 4, 1, 4, 2, 4, - 1, 3, 3, 2, 2, 0, 1, 1, - 1, 0, 1, 0, 3, 3, 1, 2, - 2, 2, 0, 5, 1, 1, 0, 1, - 0, 1, 1, 1, 0, 0, 0, 3, - 1, 1, 1, 1, 1, 2, 1, 1, - 4, 1, 1, 1, 1, 1, 4, 1, - 2, 2, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 2, 4, - 1, 2, 1, 2, 2, 5, 6, 2, - 2, 5, 1, 3, 2, 3, 5, 2, - 3, 1, 3, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 2, 3, 1, 1, - 2, 2, 1, 2, 1, 1, 2, 1, - 2, 1, 2, 2, 2, 1, 1, 4, - 2, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 1, 2, 2, 1, 1, 2, 2, 1, - 1, 3, 2, 2, 0, 0, 2, 0, - 0, 0, 0, 1, 4, 1, 0, 2, - 1, 2, 2, 0, 2, 2, 1, 1, - 2, 6, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 2, 0, 1, 1, - 1, 1, 0, 1, 0, 3, 3, 1, - 2, 2, 2, 0, 5, 1, 1, 0, - 1, 0, 0, 0, 1, 1, 1, 0, - 0, 5, 2, 6, 2, 8, 4, 2, - 5, 0, 3, 2, 4, 1, 6, 2, - 4, 4, 1, 1, 2, 1, 2, 1, - 4, 0, 0, 4, 4, 1, 1, 2, - 2, 2, 2, 1, 1, 6, 2, 5, - 1, 3, 3, 4, 4, 4, 4, 2, - 0, 0, 1, 1, 0, 1, 0, 1, - 1, 0, 2, 1, 1, 2, 4, 1, - 2, 4, 1, 5, 0, 3, 2, 1, - 0, 0, 2, 0, 0, 0, 0, 1, - 4, 1, 0, 2, 1, 4, 2, 0, - 4, 3, 4, 2, 2, 6, 2, 2, - 4, 1, 4, 2, 4, 1, 3, 3, - 2, 2, 0, 1, 1, 1, 0, 1, - 0, 3, 3, 1, 2, 2, 2, 0, - 5, 1, 1, 0, 1, 0, 1, 1, - 1, 0, 1, 3, 1, 3, 3, 1, - 0, 0, 0, 0, 0, 1, 1, 1, - 3, 2, 4, 1, 0, 1, 1, 1, - 3, 1, 1, 1, 3, 1, 3, 1, - 3, 1, 2, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 2, - 4, 1, 2, 1, 2, 2, 5, 6, - 2, 2, 5, 1, 3, 2, 3, 5, - 2, 3, 1, 3, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 2, 3, 1, - 1, 2, 2, 1, 2, 1, 1, 2, - 1, 2, 1, 2, 2, 2, 1, 1, - 4, 2, 0, 0, 0, 1, 0, 1, - 0, 1, 0, 1, 1, 0, 2, 1, - 1, 1, 2, 2, 1, 1, 2, 2, - 1, 1, 3, 2, 2, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 2, 2, 0, 2, 2, 1, - 1, 2, 6, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 2, 2, 0, 1, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 1, 4, - 1, 2, 2, 5, 2, 6, 2, 8, - 4, 2, 5, 0, 3, 2, 4, 1, - 6, 2, 4, 4, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 4, 4, 1, - 1, 2, 2, 2, 2, 1, 1, 6, - 2, 5, 1, 3, 3, 4, 4, 4, - 4, 2, 0, 0, 1, 1, 0, 1, - 0, 1, 1, 0, 2, 1, 1, 2, - 4, 1, 2, 4, 1, 5, 0, 3, - 2, 1, 0, 0, 2, 0, 0, 0, - 0, 1, 4, 1, 0, 2, 1, 4, - 2, 0, 4, 3, 4, 2, 2, 6, - 2, 2, 4, 1, 4, 2, 4, 1, - 3, 3, 2, 2, 0, 1, 1, 1, - 0, 1, 0, 3, 3, 1, 2, 2, - 2, 0, 5, 1, 1, 0, 1, 0, - 1, 1, 1, 0, 0, 0, 0, 1, - 1, 1, 0, 0, 0, 0, 1, 1, - 5, 9, 2, 1, 3, 5, 3, 1, - 6, 1, 1, 2, 2, 2, 6, 0, - 0, 0, 4, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -} - -var _graphclust_index_offsets []int16 = []int16{ - 0, 0, 2, 4, 6, 8, 11, 15, - 17, 20, 25, 28, 30, 32, 34, 63, - 68, 70, 73, 76, 80, 84, 90, 97, - 102, 106, 112, 115, 120, 123, 129, 135, - 138, 144, 146, 152, 155, 157, 161, 163, - 169, 171, 176, 178, 200, 203, 207, 212, - 214, 217, 220, 222, 225, 227, 230, 233, - 235, 241, 243, 246, 249, 252, 254, 256, - 262, 265, 271, 274, 281, 283, 285, 287, - 289, 291, 294, 296, 298, 314, 317, 319, - 321, 326, 329, 332, 334, 336, 339, 342, - 344, 348, 353, 357, 360, 364, 366, 369, - 377, 383, 385, 387, 389, 395, 397, 421, - 424, 426, 429, 432, 434, 437, 440, 443, - 445, 449, 457, 459, 461, 463, 465, 468, - 471, 473, 475, 477, 480, 483, 488, 490, - 492, 494, 496, 498, 500, 507, 511, 515, - 517, 520, 523, 527, 531, 537, 539, 541, - 545, 547, 549, 551, 553, 556, 560, 562, - 565, 570, 573, 575, 577, 579, 610, 615, - 617, 620, 626, 634, 640, 649, 654, 665, - 673, 678, 686, 690, 697, 701, 708, 714, - 723, 728, 737, 746, 750, 752, 757, 759, - 765, 768, 773, 775, 797, 803, 808, 814, - 816, 819, 822, 826, 831, 833, 836, 844, - 848, 858, 860, 867, 872, 880, 887, 892, - 900, 903, 909, 912, 914, 916, 918, 920, - 923, 925, 927, 943, 946, 948, 950, 957, - 962, 964, 967, 975, 978, 984, 989, 994, - 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, - 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, - 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, - 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, - 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, - 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, - 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, - 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, - 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, - 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, - 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, - 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, - 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, - 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, - 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, - 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, - 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, - 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, - 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, - 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, - 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, - 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, - 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, - 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, - 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, - 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, - 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, - 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, - 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, - 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, - 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, - 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, - 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, - 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, - 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, - 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, - 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, - 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, - 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, - 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, - 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, - 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, - 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, - 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, - 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, - 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, - 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, - 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, - 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, - 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, - 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, - 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, - 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, - 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, - 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, - 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, - 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, - 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, - 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, - 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, - 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, - 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, - 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, - 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, - 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, - 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, - 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, - 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, - 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, - 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, - 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, - 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, - 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, - 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, - 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, - 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, - 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, - 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, - 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, - 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, - 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, - 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, - 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, - 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, - 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, - 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, - 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, - 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, - 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, - 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, - 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, - 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, - 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, - 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, - 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, - 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, - 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, - 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, - 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, - 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, - 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, - 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, - 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, - 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, - 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, - 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, - 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, - 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, - 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, - 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, - 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, - 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, - 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, - 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, - 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, - 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, - 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, - 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, - 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, - 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, - 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, - 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, - 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, - 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, - 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, - 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, - 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, - 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, - 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, - 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, - 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, - 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, - 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, - 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, - 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, - 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, - 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, - 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, - 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, - 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, - 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, - 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, - 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, - 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, - 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, - 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, - 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, - 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, - 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, - 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, - 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, - 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, - 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, - 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, - 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, - 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, - 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, - 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, - 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, - 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, - 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, - 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, - 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, - 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, - 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, - 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, - 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, - 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, - 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, - 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, - 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, - 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, - 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, - 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, - 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, - 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, - 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, - 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, - 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, - 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, - 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, - 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, - 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, - 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, - 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, - 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, - 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, - 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, - 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, - 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, - 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, - 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, - 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, - 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, - 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, - 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, - 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, - 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, - 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, - 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, - 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, - 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, - 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, - 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, - 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, - 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, - 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, - 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, - 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, - 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, - 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, - 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, - 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, - 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, - 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, - 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, - 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, - 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, - 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, - 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, - 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, - 9718, 9739, 9760, 9781, -} - -var _graphclust_indicies []int16 = []int16{ - 0, 1, 3, 2, 2, 3, 3, 2, - 3, 3, 2, 3, 3, 3, 2, 3, - 2, 3, 3, 2, 3, 3, 3, 3, - 2, 3, 3, 2, 2, 3, 3, 2, - 3, 2, 4, 5, 6, 7, 8, 10, - 11, 12, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 9, 13, 2, 3, - 3, 3, 3, 2, 3, 2, 3, 3, - 2, 2, 2, 3, 2, 2, 2, 3, - 3, 3, 3, 2, 2, 2, 2, 2, - 2, 3, 2, 2, 2, 2, 2, 2, - 3, 2, 2, 2, 2, 3, 3, 3, - 3, 2, 3, 3, 3, 3, 3, 2, - 3, 3, 2, 3, 3, 3, 3, 2, - 3, 3, 2, 2, 2, 2, 2, 2, - 3, 3, 3, 3, 3, 3, 2, 3, - 3, 2, 2, 2, 2, 2, 2, 3, - 3, 2, 3, 3, 3, 3, 3, 2, - 3, 3, 2, 3, 2, 3, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 2, - 3, 2, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 52, 2, - 3, 3, 2, 3, 3, 3, 2, 3, - 3, 3, 3, 2, 3, 2, 3, 3, - 2, 3, 3, 2, 3, 2, 2, 2, - 3, 3, 2, 3, 3, 2, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 3, 2, 2, 2, - 3, 3, 3, 2, 3, 2, 3, 2, - 3, 3, 3, 3, 3, 2, 3, 3, - 2, 53, 54, 55, 56, 57, 2, 3, - 58, 2, 53, 54, 59, 55, 56, 57, - 2, 3, 2, 3, 2, 3, 2, 3, - 2, 3, 2, 60, 61, 2, 3, 2, - 3, 2, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 75, - 76, 2, 3, 3, 2, 3, 2, 3, - 2, 3, 3, 3, 3, 2, 3, 3, - 2, 2, 2, 3, 3, 2, 3, 2, - 3, 3, 2, 2, 2, 3, 3, 2, - 3, 3, 3, 2, 3, 3, 3, 3, - 2, 3, 3, 3, 2, 3, 3, 2, - 77, 78, 63, 2, 3, 2, 3, 3, - 2, 79, 80, 81, 82, 83, 84, 85, - 2, 86, 87, 88, 89, 90, 2, 3, - 2, 3, 2, 3, 2, 3, 3, 3, - 3, 3, 2, 3, 2, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 104, 108, - 109, 110, 111, 112, 2, 3, 3, 2, - 2, 3, 2, 2, 3, 3, 3, 2, - 3, 2, 3, 3, 2, 2, 2, 3, - 3, 3, 2, 3, 2, 3, 3, 3, - 2, 3, 3, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 2, 3, 2, 2, - 3, 3, 3, 2, 2, 2, 3, 2, - 3, 3, 2, 3, 2, 3, 3, 2, - 3, 3, 2, 113, 114, 115, 116, 2, - 3, 2, 3, 2, 3, 2, 3, 2, - 117, 2, 3, 2, 118, 119, 120, 121, - 122, 123, 2, 3, 3, 3, 2, 2, - 2, 2, 3, 3, 2, 3, 3, 2, - 2, 2, 3, 3, 3, 3, 2, 124, - 125, 126, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 2, 127, 128, 129, - 2, 130, 2, 2, 130, 2, 130, 130, - 2, 130, 130, 2, 130, 130, 130, 2, - 130, 2, 130, 130, 2, 130, 130, 130, - 130, 2, 130, 130, 2, 2, 130, 130, - 2, 130, 2, 131, 132, 133, 134, 135, - 136, 137, 139, 140, 141, 142, 143, 144, - 145, 146, 147, 148, 149, 150, 22, 151, - 152, 153, 154, 155, 156, 157, 158, 159, - 138, 2, 130, 130, 130, 130, 2, 130, - 2, 130, 130, 2, 3, 3, 2, 2, - 3, 130, 130, 2, 130, 130, 2, 130, - 2, 3, 130, 130, 130, 3, 3, 2, - 130, 130, 130, 2, 2, 2, 130, 2, - 3, 3, 130, 130, 3, 2, 130, 130, - 130, 2, 130, 2, 130, 2, 130, 2, - 3, 2, 2, 130, 130, 2, 130, 2, - 3, 130, 130, 3, 130, 2, 3, 130, - 130, 3, 3, 130, 130, 2, 130, 130, - 3, 2, 130, 130, 130, 3, 3, 3, - 2, 130, 3, 130, 2, 2, 2, 3, - 2, 2, 2, 130, 130, 130, 3, 130, - 3, 2, 130, 130, 3, 3, 3, 130, - 130, 130, 2, 130, 130, 3, 3, 2, - 2, 2, 130, 130, 130, 2, 130, 2, - 3, 130, 130, 130, 130, 3, 130, 3, - 3, 2, 130, 3, 130, 2, 130, 2, - 130, 3, 130, 130, 2, 130, 2, 130, - 130, 130, 130, 3, 2, 3, 130, 2, - 130, 130, 130, 130, 2, 130, 2, 160, - 161, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 172, 173, 174, 175, 176, - 177, 178, 179, 180, 2, 3, 130, 130, - 3, 130, 2, 3, 130, 130, 130, 2, - 130, 3, 130, 130, 130, 2, 130, 2, - 130, 130, 2, 130, 130, 2, 3, 130, - 3, 2, 130, 130, 130, 2, 3, 130, - 2, 130, 130, 2, 130, 130, 3, 130, - 3, 3, 130, 2, 130, 130, 3, 2, - 130, 130, 130, 130, 3, 130, 130, 3, - 130, 2, 130, 2, 3, 3, 3, 130, - 130, 3, 2, 130, 2, 130, 2, 3, - 3, 3, 3, 130, 130, 3, 130, 2, - 3, 130, 130, 3, 130, 3, 2, 3, - 130, 3, 130, 2, 3, 130, 130, 130, - 130, 3, 130, 2, 130, 130, 2, 181, - 182, 183, 184, 185, 2, 130, 58, 2, - 130, 2, 130, 2, 130, 2, 130, 2, - 186, 187, 2, 130, 2, 130, 2, 188, - 189, 190, 191, 66, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 2, 130, - 130, 2, 130, 2, 130, 2, 130, 130, - 130, 3, 3, 130, 2, 130, 2, 130, - 2, 3, 130, 2, 130, 3, 2, 3, - 130, 130, 130, 3, 130, 3, 2, 130, - 2, 3, 130, 3, 130, 3, 130, 2, - 130, 130, 3, 130, 2, 130, 130, 130, - 130, 2, 130, 3, 3, 130, 130, 3, - 2, 130, 130, 3, 130, 3, 2, 202, - 203, 189, 2, 130, 2, 130, 130, 2, - 204, 205, 206, 207, 208, 209, 210, 2, - 211, 212, 213, 214, 215, 2, 130, 2, - 130, 2, 130, 2, 130, 130, 130, 130, - 130, 2, 130, 2, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 228, 229, 230, 231, 232, 233, 234, 235, - 236, 237, 238, 2, 130, 3, 130, 2, - 2, 130, 3, 2, 3, 3, 2, 130, - 3, 130, 130, 2, 130, 2, 3, 130, - 3, 130, 3, 2, 2, 130, 2, 3, - 130, 130, 3, 130, 3, 130, 2, 130, - 3, 130, 2, 130, 130, 3, 130, 3, - 2, 130, 130, 3, 3, 3, 3, 130, - 130, 2, 3, 130, 2, 3, 3, 130, - 2, 130, 3, 130, 3, 130, 3, 130, - 2, 3, 2, 130, 130, 3, 3, 130, - 3, 130, 2, 2, 2, 130, 130, 3, - 130, 3, 130, 2, 2, 130, 3, 3, - 130, 3, 130, 2, 3, 130, 3, 130, - 2, 3, 3, 130, 130, 2, 3, 3, - 3, 130, 130, 2, 239, 240, 115, 241, - 2, 130, 2, 130, 2, 130, 2, 242, - 2, 130, 2, 243, 244, 245, 246, 247, - 248, 2, 3, 3, 130, 130, 130, 2, - 2, 2, 2, 130, 130, 2, 130, 130, - 2, 2, 2, 130, 130, 130, 130, 2, - 249, 250, 251, 2, 130, 130, 130, 130, - 130, 2, 130, 2, 130, 2, 252, 2, - 3, 2, 253, 2, 254, 255, 256, 258, - 257, 2, 130, 2, 2, 130, 130, 3, - 2, 3, 2, 259, 2, 260, 261, 262, - 264, 263, 2, 3, 2, 2, 3, 3, - 79, 80, 81, 82, 83, 84, 2, 3, - 1, 265, 265, 3, 1, 265, 266, 3, - 1, 267, 268, 267, 268, 268, 267, 268, - 268, 267, 268, 268, 268, 267, 268, 267, - 268, 268, 267, 268, 268, 268, 268, 267, - 268, 268, 267, 267, 268, 268, 267, 268, - 267, 269, 270, 271, 272, 273, 275, 276, - 277, 279, 280, 281, 282, 283, 284, 285, - 286, 287, 288, 289, 290, 291, 292, 293, - 294, 295, 296, 274, 278, 267, 268, 268, - 268, 268, 267, 268, 267, 268, 268, 267, - 267, 267, 268, 267, 267, 267, 268, 268, - 268, 268, 267, 267, 267, 267, 267, 267, - 268, 267, 267, 267, 267, 267, 267, 268, - 267, 267, 267, 267, 268, 268, 268, 268, - 267, 268, 268, 268, 268, 268, 267, 268, - 268, 267, 268, 268, 268, 268, 267, 268, - 268, 267, 267, 267, 267, 267, 267, 268, - 268, 268, 268, 268, 268, 267, 268, 268, - 267, 267, 267, 267, 267, 267, 268, 268, - 267, 268, 268, 268, 268, 268, 267, 268, - 268, 267, 268, 267, 268, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 267, 268, - 267, 297, 298, 299, 300, 301, 302, 303, - 304, 305, 306, 307, 308, 309, 310, 311, - 312, 313, 314, 315, 316, 317, 267, 268, - 268, 267, 268, 268, 268, 267, 268, 268, - 268, 268, 267, 268, 267, 268, 268, 267, - 268, 268, 267, 268, 267, 267, 267, 268, - 268, 267, 268, 268, 267, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 268, 267, 267, 267, 268, - 268, 268, 267, 268, 267, 268, 267, 268, - 268, 268, 268, 268, 267, 268, 268, 267, - 318, 319, 320, 321, 322, 267, 268, 323, - 267, 318, 319, 324, 320, 321, 322, 267, - 268, 267, 268, 267, 268, 267, 268, 267, - 268, 267, 325, 326, 267, 268, 267, 268, - 267, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, - 267, 268, 268, 267, 268, 267, 268, 267, - 268, 268, 268, 268, 267, 268, 268, 267, - 267, 267, 268, 268, 267, 268, 267, 268, - 268, 267, 267, 267, 268, 268, 267, 268, - 268, 268, 267, 268, 268, 268, 268, 267, - 268, 268, 268, 267, 268, 268, 267, 342, - 343, 328, 267, 268, 267, 268, 268, 267, - 344, 345, 346, 347, 348, 349, 350, 267, - 351, 352, 353, 354, 355, 267, 268, 267, - 268, 267, 268, 267, 268, 268, 268, 268, - 268, 267, 268, 267, 356, 357, 358, 359, - 360, 361, 362, 363, 364, 365, 366, 367, - 368, 369, 370, 371, 372, 369, 373, 374, - 375, 376, 377, 267, 268, 268, 267, 267, - 268, 267, 267, 268, 268, 268, 267, 268, - 267, 268, 268, 267, 267, 267, 268, 268, - 268, 267, 268, 267, 268, 268, 268, 267, - 268, 268, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 267, 268, 267, 267, 268, - 268, 268, 267, 267, 267, 268, 267, 268, - 268, 267, 268, 267, 268, 268, 267, 268, - 268, 267, 378, 379, 380, 381, 267, 268, - 267, 268, 267, 268, 267, 268, 267, 382, - 267, 268, 267, 383, 384, 385, 386, 387, - 388, 267, 268, 268, 268, 267, 267, 267, - 267, 268, 268, 267, 268, 268, 267, 267, - 267, 268, 268, 268, 268, 267, 389, 390, - 391, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 267, 392, 393, 394, 267, - 395, 267, 395, 267, 267, 395, 395, 267, - 395, 395, 267, 395, 395, 395, 267, 395, - 267, 395, 395, 267, 395, 395, 395, 395, - 267, 395, 395, 267, 267, 395, 395, 267, - 395, 267, 396, 397, 398, 399, 400, 401, - 402, 404, 405, 406, 407, 408, 409, 410, - 411, 412, 413, 414, 415, 287, 416, 417, - 418, 419, 420, 421, 422, 423, 424, 403, - 267, 395, 395, 395, 395, 267, 395, 267, - 395, 395, 267, 268, 268, 267, 267, 268, - 395, 395, 267, 395, 395, 267, 395, 267, - 268, 395, 395, 395, 268, 268, 267, 395, - 395, 395, 267, 267, 267, 395, 267, 268, - 268, 395, 395, 268, 267, 395, 395, 395, - 267, 395, 267, 395, 267, 395, 267, 268, - 267, 267, 395, 395, 267, 395, 267, 268, - 395, 395, 268, 395, 267, 268, 395, 395, - 268, 268, 395, 395, 267, 395, 395, 268, - 267, 395, 395, 395, 268, 268, 268, 267, - 395, 268, 395, 267, 267, 267, 268, 267, - 267, 267, 395, 395, 395, 268, 395, 268, - 267, 395, 395, 268, 268, 268, 395, 395, - 395, 267, 395, 395, 268, 268, 267, 267, - 267, 395, 395, 395, 267, 395, 267, 268, - 395, 395, 395, 395, 268, 395, 268, 268, - 267, 395, 268, 395, 267, 395, 267, 395, - 268, 395, 395, 267, 395, 267, 395, 395, - 395, 395, 268, 267, 268, 395, 267, 395, - 395, 395, 395, 267, 395, 267, 425, 426, - 427, 428, 429, 430, 431, 432, 433, 434, - 435, 436, 437, 438, 439, 440, 441, 442, - 443, 444, 445, 267, 268, 395, 395, 268, - 395, 267, 268, 395, 395, 395, 267, 395, - 268, 395, 395, 395, 267, 395, 267, 395, - 395, 267, 395, 395, 267, 268, 395, 268, - 267, 395, 395, 395, 267, 268, 395, 267, - 395, 395, 267, 395, 395, 268, 395, 268, - 268, 395, 267, 395, 395, 268, 267, 395, - 395, 395, 395, 268, 395, 395, 268, 395, - 267, 395, 267, 268, 268, 268, 395, 395, - 268, 267, 395, 267, 395, 267, 268, 268, - 268, 268, 395, 395, 268, 395, 267, 268, - 395, 395, 268, 395, 268, 267, 268, 395, - 268, 395, 267, 268, 395, 395, 395, 395, - 268, 395, 267, 395, 395, 267, 446, 447, - 448, 449, 450, 267, 395, 323, 267, 395, - 267, 395, 267, 395, 267, 395, 267, 451, - 452, 267, 395, 267, 395, 267, 453, 454, - 455, 456, 331, 457, 458, 459, 460, 461, - 462, 463, 464, 465, 466, 267, 395, 395, - 267, 395, 267, 395, 267, 395, 395, 395, - 268, 268, 395, 267, 395, 267, 395, 267, - 268, 395, 267, 395, 268, 267, 268, 395, - 395, 395, 268, 395, 268, 267, 395, 267, - 268, 395, 268, 395, 268, 395, 267, 395, - 395, 268, 395, 267, 395, 395, 395, 395, - 267, 395, 268, 268, 395, 395, 268, 267, - 395, 395, 268, 395, 268, 267, 467, 468, - 454, 267, 395, 267, 395, 395, 267, 469, - 470, 471, 472, 473, 474, 475, 267, 476, - 477, 478, 479, 480, 267, 395, 267, 395, - 267, 395, 267, 395, 395, 395, 395, 395, - 267, 395, 267, 481, 482, 483, 484, 485, - 486, 487, 488, 489, 490, 491, 492, 493, - 494, 495, 496, 497, 498, 499, 500, 501, - 502, 503, 267, 395, 268, 395, 267, 267, - 395, 268, 267, 268, 268, 267, 395, 268, - 395, 395, 267, 395, 267, 268, 395, 268, - 395, 268, 267, 267, 395, 267, 268, 395, - 395, 268, 395, 268, 395, 267, 395, 268, - 395, 267, 395, 395, 268, 395, 268, 267, - 395, 395, 268, 268, 268, 268, 395, 395, - 267, 268, 395, 267, 268, 268, 395, 267, - 395, 268, 395, 268, 395, 268, 395, 267, - 268, 267, 395, 395, 268, 268, 395, 268, - 395, 267, 267, 267, 395, 395, 268, 395, - 268, 395, 267, 267, 395, 268, 268, 395, - 268, 395, 267, 268, 395, 268, 395, 267, - 268, 268, 395, 395, 267, 268, 268, 268, - 395, 395, 267, 504, 505, 380, 506, 267, - 395, 267, 395, 267, 395, 267, 507, 267, - 395, 267, 508, 509, 510, 511, 512, 513, - 267, 268, 268, 395, 395, 395, 267, 267, - 267, 267, 395, 395, 267, 395, 395, 267, - 267, 267, 395, 395, 395, 395, 267, 514, - 515, 516, 267, 395, 395, 395, 395, 395, - 267, 395, 267, 395, 267, 517, 267, 268, - 267, 518, 267, 519, 520, 521, 523, 522, - 267, 395, 267, 267, 395, 395, 268, 267, - 268, 267, 524, 267, 525, 526, 527, 529, - 528, 267, 268, 267, 267, 268, 268, 344, - 345, 346, 347, 348, 349, 267, 268, 267, - 268, 268, 267, 266, 268, 268, 267, 266, - 268, 267, 266, 268, 267, 531, 532, 530, - 267, 266, 268, 267, 266, 268, 267, 533, - 534, 535, 536, 537, 530, 267, 538, 267, - 297, 298, 299, 533, 534, 539, 300, 301, - 302, 303, 304, 305, 306, 307, 308, 309, - 310, 311, 312, 313, 314, 315, 316, 317, - 267, 540, 538, 297, 298, 299, 541, 535, - 536, 300, 301, 302, 303, 304, 305, 306, - 307, 308, 309, 310, 311, 312, 313, 314, - 315, 316, 317, 267, 540, 267, 542, 540, - 297, 298, 299, 543, 536, 300, 301, 302, - 303, 304, 305, 306, 307, 308, 309, 310, - 311, 312, 313, 314, 315, 316, 317, 267, - 542, 267, 267, 542, 544, 267, 542, 267, - 545, 546, 267, 540, 267, 267, 542, 267, - 540, 267, 540, 327, 328, 329, 330, 331, - 332, 333, 547, 335, 336, 337, 338, 339, - 340, 341, 549, 550, 551, 552, 553, 554, - 549, 550, 551, 552, 553, 554, 549, 548, - 555, 267, 268, 538, 267, 556, 556, 556, - 542, 267, 297, 298, 299, 541, 539, 300, - 301, 302, 303, 304, 305, 306, 307, 308, - 309, 310, 311, 312, 313, 314, 315, 316, - 317, 267, 545, 557, 267, 267, 540, 556, - 556, 542, 556, 556, 542, 556, 556, 556, - 542, 556, 556, 542, 556, 556, 542, 556, - 556, 267, 542, 542, 551, 552, 553, 554, - 548, 549, 551, 552, 553, 554, 548, 549, - 551, 552, 553, 554, 548, 549, 551, 552, - 553, 554, 548, 549, 551, 552, 553, 554, - 548, 549, 551, 552, 553, 554, 548, 549, - 551, 552, 553, 554, 548, 549, 551, 552, - 553, 554, 548, 549, 551, 552, 553, 554, - 548, 549, 550, 555, 552, 553, 554, 548, - 549, 550, 552, 553, 554, 548, 549, 550, - 552, 553, 554, 548, 549, 550, 552, 553, - 554, 548, 549, 550, 552, 553, 554, 548, - 549, 550, 552, 553, 554, 548, 549, 550, - 552, 553, 554, 548, 549, 550, 552, 553, - 554, 548, 549, 550, 552, 553, 554, 548, - 549, 550, 551, 555, 553, 554, 548, 549, - 550, 551, 553, 554, 548, 549, 550, 551, - 553, 554, 548, 549, 550, 551, 553, 554, - 548, 549, 550, 551, 553, 558, 557, 552, - 267, 555, 556, 267, 540, 542, 268, 268, - 267, 559, 560, 561, 562, 563, 530, 267, - 268, 323, 268, 268, 268, 267, 268, 268, - 267, 395, 268, 267, 395, 268, 267, 268, - 395, 268, 267, 530, 267, 564, 566, 567, - 568, 569, 570, 571, 566, 567, 568, 569, - 570, 571, 566, 530, 565, 555, 267, 268, - 538, 268, 267, 540, 540, 540, 542, 267, - 540, 540, 542, 540, 540, 542, 540, 540, - 540, 542, 540, 540, 542, 540, 540, 542, - 540, 540, 267, 542, 568, 569, 570, 571, - 565, 566, 568, 569, 570, 571, 565, 566, - 568, 569, 570, 571, 565, 566, 568, 569, - 570, 571, 565, 566, 568, 569, 570, 571, - 565, 566, 568, 569, 570, 571, 565, 566, - 568, 569, 570, 571, 565, 566, 568, 569, - 570, 571, 565, 566, 568, 569, 570, 571, - 565, 566, 567, 555, 569, 570, 571, 565, - 566, 567, 569, 570, 571, 565, 566, 567, - 569, 570, 571, 565, 566, 567, 569, 570, - 571, 565, 566, 567, 569, 570, 571, 565, - 566, 567, 569, 570, 571, 565, 566, 567, - 569, 570, 571, 565, 566, 567, 569, 570, - 571, 565, 566, 567, 569, 570, 571, 565, - 566, 567, 568, 555, 570, 571, 565, 566, - 567, 568, 570, 571, 565, 566, 567, 568, - 570, 571, 565, 566, 567, 568, 570, 571, - 565, 566, 567, 568, 570, 572, 573, 569, - 267, 555, 540, 268, 540, 542, 268, 542, - 268, 267, 540, 574, 575, 530, 267, 268, - 267, 268, 268, 268, 267, 577, 578, 579, - 580, 576, 267, 581, 582, 530, 267, 266, - 268, 267, 268, 266, 268, 267, 583, 530, - 267, 268, 268, 267, 584, 530, 267, 268, - 268, 267, 585, 586, 587, 588, 589, 590, - 591, 592, 593, 594, 595, 530, 267, 268, - 596, 267, 344, 345, 346, 347, 348, 349, - 597, 267, 598, 267, 268, 267, 395, 268, - 267, 268, 395, 268, 395, 268, 267, 395, - 395, 268, 395, 268, 395, 268, 395, 268, - 395, 268, 267, 268, 268, 395, 395, 268, - 267, 395, 395, 268, 267, 395, 268, 395, - 268, 267, 268, 395, 268, 395, 268, 267, - 395, 268, 395, 268, 267, 395, 268, 267, - 395, 395, 268, 268, 395, 268, 395, 268, - 395, 267, 576, 267, 599, 576, 267, 322, - 530, 600, 530, 267, 268, 267, 266, 3, - 1, 266, 3, 1, 602, 603, 601, 1, - 266, 3, 1, 266, 3, 1, 604, 605, - 606, 607, 608, 601, 1, 609, 610, 612, - 611, 611, 612, 612, 611, 612, 612, 611, - 612, 612, 612, 611, 612, 611, 612, 612, - 611, 612, 612, 612, 612, 611, 612, 612, - 611, 611, 612, 612, 611, 612, 611, 613, - 614, 615, 616, 617, 619, 620, 621, 623, - 624, 625, 626, 627, 628, 629, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, - 640, 618, 622, 611, 612, 612, 612, 612, - 611, 612, 611, 612, 612, 611, 611, 611, - 612, 611, 611, 611, 612, 612, 612, 612, - 611, 611, 611, 611, 611, 611, 612, 611, - 611, 611, 611, 611, 611, 612, 611, 611, - 611, 611, 612, 612, 612, 612, 611, 612, - 612, 612, 612, 612, 611, 612, 612, 611, - 612, 612, 612, 612, 611, 612, 612, 611, - 611, 611, 611, 611, 611, 612, 612, 612, - 612, 612, 612, 611, 612, 612, 611, 611, - 611, 611, 611, 611, 612, 612, 611, 612, - 612, 612, 612, 612, 611, 612, 612, 611, - 612, 611, 612, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 611, 612, 611, 641, - 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, - 658, 659, 660, 661, 611, 612, 612, 611, - 612, 612, 612, 611, 612, 612, 612, 612, - 611, 612, 611, 612, 612, 611, 612, 612, - 611, 612, 611, 611, 611, 612, 612, 611, - 612, 612, 611, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 612, 611, 611, 611, 612, 612, 612, - 611, 612, 611, 612, 611, 612, 612, 612, - 612, 612, 611, 612, 612, 611, 662, 663, - 664, 665, 666, 611, 612, 667, 611, 662, - 663, 668, 664, 665, 666, 611, 612, 611, - 612, 611, 612, 611, 612, 611, 612, 611, - 669, 670, 611, 612, 611, 612, 611, 671, - 672, 673, 674, 675, 676, 677, 678, 679, - 680, 681, 682, 683, 684, 685, 611, 612, - 612, 611, 612, 611, 612, 611, 612, 612, - 612, 612, 611, 612, 612, 611, 611, 611, - 612, 612, 611, 612, 611, 612, 612, 611, - 611, 611, 612, 612, 611, 612, 612, 612, - 611, 612, 612, 612, 612, 611, 612, 612, - 612, 611, 612, 612, 611, 686, 687, 672, - 611, 612, 611, 612, 612, 611, 688, 689, - 690, 691, 692, 693, 694, 611, 695, 696, - 697, 698, 699, 611, 612, 611, 612, 611, - 612, 611, 612, 612, 612, 612, 612, 611, - 612, 611, 700, 701, 702, 703, 704, 705, - 706, 707, 708, 709, 710, 711, 712, 713, - 714, 715, 716, 713, 717, 718, 719, 720, - 721, 611, 612, 612, 611, 611, 612, 611, - 611, 612, 612, 612, 611, 612, 611, 612, - 612, 611, 611, 611, 612, 612, 612, 611, - 612, 611, 612, 612, 612, 611, 612, 612, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 611, 612, 611, 611, 612, 612, 612, - 611, 611, 611, 612, 611, 612, 612, 611, - 612, 611, 612, 612, 611, 612, 612, 611, - 722, 723, 724, 725, 611, 612, 611, 612, - 611, 612, 611, 612, 611, 726, 611, 612, - 611, 727, 728, 729, 730, 731, 732, 611, - 612, 612, 612, 611, 611, 611, 611, 612, - 612, 611, 612, 612, 611, 611, 611, 612, - 612, 612, 612, 611, 733, 734, 735, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 611, 736, 737, 738, 611, 739, 611, - 739, 611, 611, 739, 739, 611, 739, 739, - 611, 739, 739, 739, 611, 739, 611, 739, - 739, 611, 739, 739, 739, 739, 611, 739, - 739, 611, 611, 739, 739, 611, 739, 611, - 740, 741, 742, 743, 744, 745, 746, 748, - 749, 750, 751, 752, 753, 754, 755, 756, - 757, 758, 759, 631, 760, 761, 762, 763, - 764, 765, 766, 767, 768, 747, 611, 739, - 739, 739, 739, 611, 739, 611, 739, 739, - 611, 612, 612, 611, 611, 612, 739, 739, - 611, 739, 739, 611, 739, 611, 612, 739, - 739, 739, 612, 612, 611, 739, 739, 739, - 611, 611, 611, 739, 611, 612, 612, 739, - 739, 612, 611, 739, 739, 739, 611, 739, - 611, 739, 611, 739, 611, 612, 611, 611, - 739, 739, 611, 739, 611, 612, 739, 739, - 612, 739, 611, 612, 739, 739, 612, 612, - 739, 739, 611, 739, 739, 612, 611, 739, - 739, 739, 612, 612, 612, 611, 739, 612, - 739, 611, 611, 611, 612, 611, 611, 611, - 739, 739, 739, 612, 739, 612, 611, 739, - 739, 612, 612, 612, 739, 739, 739, 611, - 739, 739, 612, 612, 611, 611, 611, 739, - 739, 739, 611, 739, 611, 612, 739, 739, - 739, 739, 612, 739, 612, 612, 611, 739, - 612, 739, 611, 739, 611, 739, 612, 739, - 739, 611, 739, 611, 739, 739, 739, 739, - 612, 611, 612, 739, 611, 739, 739, 739, - 739, 611, 739, 611, 769, 770, 771, 772, - 773, 774, 775, 776, 777, 778, 779, 780, - 781, 782, 783, 784, 785, 786, 787, 788, - 789, 611, 612, 739, 739, 612, 739, 611, - 612, 739, 739, 739, 611, 739, 612, 739, - 739, 739, 611, 739, 611, 739, 739, 611, - 739, 739, 611, 612, 739, 612, 611, 739, - 739, 739, 611, 612, 739, 611, 739, 739, - 611, 739, 739, 612, 739, 612, 612, 739, - 611, 739, 739, 612, 611, 739, 739, 739, - 739, 612, 739, 739, 612, 739, 611, 739, - 611, 612, 612, 612, 739, 739, 612, 611, - 739, 611, 739, 611, 612, 612, 612, 612, - 739, 739, 612, 739, 611, 612, 739, 739, - 612, 739, 612, 611, 612, 739, 612, 739, - 611, 612, 739, 739, 739, 739, 612, 739, - 611, 739, 739, 611, 790, 791, 792, 793, - 794, 611, 739, 667, 611, 739, 611, 739, - 611, 739, 611, 739, 611, 795, 796, 611, - 739, 611, 739, 611, 797, 798, 799, 800, - 675, 801, 802, 803, 804, 805, 806, 807, - 808, 809, 810, 611, 739, 739, 611, 739, - 611, 739, 611, 739, 739, 739, 612, 612, - 739, 611, 739, 611, 739, 611, 612, 739, - 611, 739, 612, 611, 612, 739, 739, 739, - 612, 739, 612, 611, 739, 611, 612, 739, - 612, 739, 612, 739, 611, 739, 739, 612, - 739, 611, 739, 739, 739, 739, 611, 739, - 612, 612, 739, 739, 612, 611, 739, 739, - 612, 739, 612, 611, 811, 812, 798, 611, - 739, 611, 739, 739, 611, 813, 814, 815, - 816, 817, 818, 819, 611, 820, 821, 822, - 823, 824, 611, 739, 611, 739, 611, 739, - 611, 739, 739, 739, 739, 739, 611, 739, - 611, 825, 826, 827, 828, 829, 830, 831, - 832, 833, 834, 835, 836, 837, 838, 839, - 840, 841, 842, 843, 844, 845, 846, 847, - 611, 739, 612, 739, 611, 611, 739, 612, - 611, 612, 612, 611, 739, 612, 739, 739, - 611, 739, 611, 612, 739, 612, 739, 612, - 611, 611, 739, 611, 612, 739, 739, 612, - 739, 612, 739, 611, 739, 612, 739, 611, - 739, 739, 612, 739, 612, 611, 739, 739, - 612, 612, 612, 612, 739, 739, 611, 612, - 739, 611, 612, 612, 739, 611, 739, 612, - 739, 612, 739, 612, 739, 611, 612, 611, - 739, 739, 612, 612, 739, 612, 739, 611, - 611, 611, 739, 739, 612, 739, 612, 739, - 611, 611, 739, 612, 612, 739, 612, 739, - 611, 612, 739, 612, 739, 611, 612, 612, - 739, 739, 611, 612, 612, 612, 739, 739, - 611, 848, 849, 724, 850, 611, 739, 611, - 739, 611, 739, 611, 851, 611, 739, 611, - 852, 853, 854, 855, 856, 857, 611, 612, - 612, 739, 739, 739, 611, 611, 611, 611, - 739, 739, 611, 739, 739, 611, 611, 611, - 739, 739, 739, 739, 611, 858, 859, 860, - 611, 739, 739, 739, 739, 739, 611, 739, - 611, 739, 611, 861, 611, 612, 611, 862, - 611, 863, 864, 865, 867, 866, 611, 739, - 611, 611, 739, 739, 612, 611, 612, 611, - 868, 611, 869, 870, 871, 873, 872, 611, - 612, 611, 611, 612, 612, 688, 689, 690, - 691, 692, 693, 611, 641, 642, 643, 604, - 605, 874, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, - 658, 659, 660, 661, 611, 875, 610, 641, - 642, 643, 876, 606, 607, 644, 645, 646, - 647, 648, 649, 650, 651, 652, 653, 654, - 655, 656, 657, 658, 659, 660, 661, 611, - 875, 611, 877, 875, 641, 642, 643, 878, - 607, 644, 645, 646, 647, 648, 649, 650, - 651, 652, 653, 654, 655, 656, 657, 658, - 659, 660, 661, 611, 877, 611, 609, 877, - 879, 611, 877, 611, 880, 881, 611, 875, - 611, 611, 877, 611, 875, 611, 875, 671, - 672, 673, 674, 675, 676, 677, 882, 679, - 680, 681, 682, 683, 684, 685, 884, 885, - 886, 887, 888, 889, 884, 885, 886, 887, - 888, 889, 884, 883, 890, 611, 612, 610, - 611, 891, 891, 891, 877, 611, 641, 642, - 643, 876, 874, 644, 645, 646, 647, 648, - 649, 650, 651, 652, 653, 654, 655, 656, - 657, 658, 659, 660, 661, 611, 880, 892, - 611, 611, 875, 891, 891, 877, 891, 891, - 877, 891, 891, 891, 877, 891, 891, 877, - 891, 891, 877, 891, 891, 611, 877, 877, - 886, 887, 888, 889, 883, 884, 886, 887, - 888, 889, 883, 884, 886, 887, 888, 889, - 883, 884, 886, 887, 888, 889, 883, 884, - 886, 887, 888, 889, 883, 884, 886, 887, - 888, 889, 883, 884, 886, 887, 888, 889, - 883, 884, 886, 887, 888, 889, 883, 884, - 886, 887, 888, 889, 883, 884, 885, 890, - 887, 888, 889, 883, 884, 885, 887, 888, - 889, 883, 884, 885, 887, 888, 889, 883, - 884, 885, 887, 888, 889, 883, 884, 885, - 887, 888, 889, 883, 884, 885, 887, 888, - 889, 883, 884, 885, 887, 888, 889, 883, - 884, 885, 887, 888, 889, 883, 884, 885, - 887, 888, 889, 883, 884, 885, 886, 890, - 888, 889, 883, 884, 885, 886, 888, 889, - 883, 884, 885, 886, 888, 889, 883, 884, - 885, 886, 888, 889, 883, 884, 885, 886, - 888, 893, 892, 887, 611, 890, 891, 611, - 875, 877, 265, 3, 1, 894, 895, 896, - 897, 898, 601, 1, 265, 899, 3, 265, - 3, 265, 3, 1, 901, 900, 900, 901, - 901, 900, 901, 901, 900, 901, 901, 901, - 900, 901, 900, 901, 901, 900, 901, 901, - 901, 901, 900, 901, 901, 900, 900, 901, - 901, 900, 901, 900, 902, 903, 904, 905, - 906, 908, 909, 910, 912, 913, 914, 915, - 916, 917, 918, 919, 920, 921, 922, 923, - 924, 925, 926, 927, 928, 929, 907, 911, - 900, 901, 901, 901, 901, 900, 901, 900, - 901, 901, 900, 900, 900, 901, 900, 900, - 900, 901, 901, 901, 901, 900, 900, 900, - 900, 900, 900, 901, 900, 900, 900, 900, - 900, 900, 901, 900, 900, 900, 900, 901, - 901, 901, 901, 900, 901, 901, 901, 901, - 901, 900, 901, 901, 900, 901, 901, 901, - 901, 900, 901, 901, 900, 900, 900, 900, - 900, 900, 901, 901, 901, 901, 901, 901, - 900, 901, 901, 900, 900, 900, 900, 900, - 900, 901, 901, 900, 901, 901, 901, 901, - 901, 900, 901, 901, 900, 901, 900, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 900, 901, 900, 930, 931, 932, 933, - 934, 935, 936, 937, 938, 939, 940, 941, - 942, 943, 944, 945, 946, 947, 948, 949, - 950, 900, 901, 901, 900, 901, 901, 901, - 900, 901, 901, 901, 901, 900, 901, 900, - 901, 901, 900, 901, 901, 900, 901, 900, - 900, 900, 901, 901, 900, 901, 901, 900, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 901, 901, 900, - 900, 900, 901, 901, 901, 900, 901, 900, - 901, 900, 901, 901, 901, 901, 901, 900, - 901, 901, 900, 951, 952, 953, 954, 955, - 900, 901, 899, 900, 901, 900, 901, 900, - 901, 900, 901, 900, 956, 957, 900, 901, - 900, 901, 900, 958, 959, 960, 961, 962, - 963, 964, 965, 966, 967, 968, 969, 970, - 971, 972, 900, 901, 901, 900, 901, 900, - 901, 900, 901, 901, 901, 901, 900, 901, - 901, 900, 900, 900, 901, 901, 900, 901, - 900, 901, 901, 900, 900, 900, 901, 901, - 900, 901, 901, 901, 900, 901, 901, 901, - 901, 900, 901, 901, 901, 900, 901, 901, - 900, 973, 974, 959, 900, 901, 900, 901, - 901, 900, 975, 976, 977, 978, 979, 980, - 900, 981, 982, 983, 984, 985, 900, 901, - 900, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 986, 987, 988, - 989, 990, 991, 992, 993, 994, 995, 996, - 997, 998, 999, 1000, 1001, 1002, 999, 1003, - 1004, 1005, 1006, 1007, 900, 901, 901, 900, - 900, 901, 900, 900, 901, 901, 901, 900, - 901, 900, 901, 901, 900, 900, 900, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 900, 901, 901, 901, 901, 901, 901, 901, - 900, 901, 900, 901, 900, 901, 900, 900, - 901, 901, 901, 900, 900, 900, 901, 900, - 901, 901, 900, 901, 900, 901, 901, 900, - 901, 901, 900, 1008, 1009, 1010, 1011, 900, - 901, 900, 901, 900, 901, 900, 901, 900, - 1012, 900, 901, 900, 1013, 1014, 1015, 1016, - 1017, 1018, 900, 901, 901, 901, 900, 900, - 900, 900, 901, 901, 900, 901, 901, 900, - 900, 900, 901, 901, 901, 901, 900, 1019, - 1020, 1021, 900, 901, 901, 901, 901, 901, - 900, 901, 900, 901, 900, 1022, 900, 1023, - 1024, 1025, 1027, 1026, 900, 901, 900, 900, - 901, 901, 951, 952, 1028, 953, 954, 955, - 900, 901, 900, 975, 976, 977, 978, 979, - 980, 1029, 900, 1030, 1031, 1032, 900, 1033, - 900, 1033, 900, 900, 1033, 1033, 900, 1033, - 1033, 900, 1033, 1033, 1033, 900, 1033, 900, - 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, - 1033, 1033, 900, 900, 1033, 1033, 900, 1033, - 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, - 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, - 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, - 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, - 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, - 1033, 900, 901, 901, 900, 900, 901, 1033, - 1033, 900, 1033, 1033, 900, 1033, 900, 901, - 1033, 1033, 1033, 901, 901, 900, 1033, 1033, - 1033, 900, 900, 900, 1033, 900, 901, 901, - 1033, 1033, 901, 900, 1033, 1033, 1033, 900, - 1033, 900, 1033, 900, 1033, 900, 901, 900, - 900, 1033, 1033, 900, 1033, 900, 901, 1033, - 1033, 901, 1033, 900, 901, 1033, 1033, 901, - 901, 1033, 1033, 900, 1033, 1033, 901, 900, - 1033, 1033, 1033, 901, 901, 901, 900, 1033, - 901, 1033, 900, 900, 900, 901, 900, 900, - 900, 1033, 1033, 1033, 901, 1033, 901, 900, - 1033, 1033, 901, 901, 901, 1033, 1033, 1033, - 900, 1033, 1033, 901, 901, 900, 900, 900, - 1033, 1033, 1033, 900, 1033, 900, 901, 1033, - 1033, 1033, 1033, 901, 1033, 901, 901, 900, - 1033, 901, 1033, 900, 1033, 900, 1033, 901, - 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, - 1033, 901, 900, 901, 1033, 900, 1033, 1033, - 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, - 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, - 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, - 1082, 1083, 900, 901, 1033, 1033, 901, 1033, - 900, 901, 1033, 1033, 1033, 900, 1033, 901, - 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, - 900, 1033, 1033, 900, 901, 1033, 901, 900, - 1033, 1033, 1033, 900, 901, 1033, 900, 1033, - 1033, 900, 1033, 1033, 901, 1033, 901, 901, - 1033, 900, 1033, 1033, 901, 900, 1033, 1033, - 1033, 1033, 901, 1033, 1033, 901, 1033, 900, - 1033, 900, 901, 901, 901, 1033, 1033, 901, - 900, 1033, 900, 1033, 900, 901, 901, 901, - 901, 1033, 1033, 901, 1033, 900, 901, 1033, - 1033, 901, 1033, 901, 900, 901, 1033, 901, - 1033, 900, 901, 1033, 1033, 1033, 1033, 901, - 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, - 1087, 1088, 900, 1033, 899, 900, 1033, 900, - 1033, 900, 1033, 900, 1033, 900, 1089, 1090, - 900, 1033, 900, 1033, 900, 1091, 1092, 1093, - 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, - 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, - 1033, 900, 1033, 900, 1033, 1033, 1033, 901, - 901, 1033, 900, 1033, 900, 1033, 900, 901, - 1033, 900, 1033, 901, 900, 901, 1033, 1033, - 1033, 901, 1033, 901, 900, 1033, 900, 901, - 1033, 901, 1033, 901, 1033, 900, 1033, 1033, - 901, 1033, 900, 1033, 1033, 1033, 1033, 900, - 1033, 901, 901, 1033, 1033, 901, 900, 1033, - 1033, 901, 1033, 901, 900, 1105, 1106, 1092, - 900, 1033, 900, 1033, 1033, 900, 1107, 1108, - 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, - 1116, 1117, 1118, 900, 1033, 900, 1033, 900, - 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, - 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, - 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, - 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, - 1141, 900, 1033, 901, 1033, 900, 900, 1033, - 901, 900, 901, 901, 900, 1033, 901, 1033, - 1033, 900, 1033, 900, 901, 1033, 901, 1033, - 901, 900, 900, 1033, 900, 901, 1033, 1033, - 901, 1033, 901, 1033, 900, 1033, 901, 1033, - 900, 1033, 1033, 901, 1033, 901, 900, 1033, - 1033, 901, 901, 901, 901, 1033, 1033, 900, - 901, 1033, 900, 901, 901, 1033, 900, 1033, - 901, 1033, 901, 1033, 901, 1033, 900, 901, - 900, 1033, 1033, 901, 901, 1033, 901, 1033, - 900, 900, 900, 1033, 1033, 901, 1033, 901, - 1033, 900, 900, 1033, 901, 901, 1033, 901, - 1033, 900, 901, 1033, 901, 1033, 900, 901, - 901, 1033, 1033, 900, 901, 901, 901, 1033, - 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, - 900, 1033, 900, 1033, 900, 1145, 900, 1033, - 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, - 901, 901, 1033, 1033, 1033, 900, 900, 900, - 900, 1033, 1033, 900, 1033, 1033, 900, 900, - 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, - 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, - 1033, 900, 1033, 900, 1155, 900, 901, 900, - 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, - 1033, 900, 900, 1033, 1033, 901, 900, 901, - 900, 3, 265, 3, 1, 1162, 3, 1, - 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, - 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, - 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, - 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, - 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, - 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, - 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, - 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, - 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, - 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, - 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, - 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, - 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, - 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, - 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, - 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, - 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, - 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, - 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, - 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, - 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, - 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, - 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, - 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, - 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, - 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, - 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, - 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, - 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, - 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, - 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, - 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, - 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, - 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, - 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, - 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, - 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, - 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, - 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, - 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, - 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, - 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, - 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, - 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, - 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, - 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, - 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, - 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, - 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, - 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, - 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, - 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, - 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, - 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, - 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, - 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, - 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, - 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, - 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, - 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, - 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, - 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, - 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, - 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, - 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, - 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, - 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, - 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, - 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, - 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, - 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, - 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, - 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, - 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, - 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, - 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, - 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, - 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, - 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, - 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, - 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, - 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, - 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, - 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, - 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, - 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, - 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, - 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, - 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, - 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, - 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, - 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, - 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, - 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, - 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, - 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, - 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, - 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, - 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, - 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, - 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, - 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, - 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, - 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, - 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, - 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, - 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, - 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, - 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, - 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, - 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, - 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, - 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, - 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, - 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, - 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, - 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, - 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, - 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, - 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, - 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, - 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, - 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, - 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, - 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, - 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, - 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, - 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, - 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, - 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, - 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, - 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, - 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, - 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, - 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, - 3, 1162, 3, 1, 601, 1, 1425, 1427, - 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, - 1430, 1431, 1432, 1427, 601, 1426, 890, 1, - 3, 610, 3, 1, 875, 875, 875, 877, - 1, 875, 875, 877, 875, 875, 877, 875, - 875, 875, 877, 875, 875, 877, 875, 875, - 877, 875, 875, 1, 877, 1429, 1430, 1431, - 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, - 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, - 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, - 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, - 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, - 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, - 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, - 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, - 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, - 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, - 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, - 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, - 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, - 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, - 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, - 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, - 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, - 1435, 1437, 1430, 1436, 1, 890, 875, 3, - 875, 877, 3, 877, 3, 1, 875, 1, - 265, 265, 1, 265, 1438, 1439, 601, 1, - 265, 3, 1, 3, 3, 265, 3, 1, - 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, - 601, 1, 266, 3, 1, 3, 266, 3, - 1, 1447, 601, 1, 3, 265, 3, 1, - 1448, 601, 1, 3, 265, 3, 1, 1449, - 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, - 1458, 1459, 601, 1, 3, 1460, 1, 1462, - 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, - 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, - 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, - 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, - 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, - 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, - 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, - 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, - 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, - 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, - 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, - 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, - 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, - 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, - 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, - 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, - 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, - 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, - 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, - 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, - 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, - 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, - 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, - 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, - 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, - 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, - 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, - 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, - 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, - 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, - 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, - 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, - 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, - 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, - 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, - 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, - 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, - 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, - 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, - 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, - 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, - 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, - 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, - 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, - 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, - 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, - 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, - 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, - 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, - 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, - 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, - 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, - 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, - 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, - 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, - 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, - 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, - 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, - 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, - 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, - 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, - 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, - 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, - 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, - 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, - 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, - 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, - 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, - 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, - 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, - 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, - 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, - 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, - 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, - 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, - 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, - 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, - 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, - 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, - 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, - 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, - 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, - 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, - 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, - 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, - 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, - 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, - 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, - 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, - 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, - 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, - 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, - 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, - 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, - 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, - 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, - 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, - 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, - 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, - 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, - 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, - 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, - 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, - 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, - 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, - 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, - 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, - 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, - 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, - 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, - 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, - 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, - 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, - 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, - 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, - 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, - 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, - 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, - 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, - 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, - 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, - 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, - 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, - 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, - 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, - 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, - 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, - 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, - 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, - 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, - 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, - 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, - 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, - 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, - 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, - 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, - 1162, 3, 1, 3, 1162, 3, 1162, 3, - 1, 1162, 1162, 3, 1162, 3, 1162, 3, - 1162, 3, 1162, 3, 1, 3, 3, 1162, - 1162, 3, 1, 1162, 1162, 3, 1, 1162, - 3, 1162, 3, 1, 3, 1162, 3, 1162, - 3, 1, 1162, 3, 1162, 3, 1, 1162, - 3, 1, 1162, 1162, 3, 3, 1162, 3, - 1162, 3, 1162, 1, 1440, 1, 1726, 1440, - 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, - 1, 265, 3, 1, 3, 265, 1, 1, - 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, - 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, - 1729, 1, 1732, 1740, 1747, 1, 1731, 262, - 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, - 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, - 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, - 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, - 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, - 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, - 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, - 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, - 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, - 1799, 1800, 1801, 1803, 268, 530, 576, 1802, - 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, - 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, - 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, - 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, - 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, - 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, - 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, - 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, - 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, - 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, - 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, - 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, - 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, - 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, - 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, - 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, - 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, - 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, - 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, - 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, - 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, - 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, - 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, - 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, - 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, - 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, - 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, - 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, - 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, - 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, - 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, - 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, - 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, - 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, - 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, - 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, - 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, - 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, - 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, - 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, - 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, - 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, - 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, - 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, - 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, - 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, - 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, - 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, - 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, - 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, - 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, - 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, - 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, - 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, - 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, - 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, - 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, - 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, - 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, - 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, - 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, - 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, - 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, - 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, - 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, - 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, - 2021, 1982, -} - -var _graphclust_trans_targs []int16 = []int16{ - 1974, 0, 1974, 1975, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, - 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 66, 68, 70, - 71, 72, 1976, 69, 74, 75, 77, 78, - 79, 80, 81, 82, 83, 84, 85, 86, - 87, 88, 89, 90, 91, 93, 94, 96, - 102, 125, 130, 132, 139, 143, 97, 98, - 99, 100, 101, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, - 124, 126, 127, 128, 129, 131, 133, 134, - 135, 136, 137, 138, 140, 141, 142, 144, - 291, 292, 1977, 158, 159, 160, 161, 162, - 163, 164, 165, 166, 167, 168, 169, 170, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 202, 203, - 204, 205, 206, 207, 208, 210, 211, 212, - 213, 214, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 228, 229, 230, - 231, 232, 234, 235, 237, 243, 267, 271, - 273, 280, 284, 238, 239, 240, 241, 242, - 244, 245, 246, 247, 248, 249, 250, 251, - 252, 253, 254, 255, 256, 257, 258, 259, - 260, 261, 262, 263, 264, 265, 266, 268, - 269, 270, 272, 274, 275, 276, 277, 278, - 279, 281, 282, 283, 285, 287, 288, 289, - 145, 290, 146, 294, 295, 296, 2, 297, - 3, 1974, 1978, 1974, 1979, 315, 316, 317, - 318, 319, 320, 321, 322, 323, 324, 325, - 326, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, - 342, 344, 345, 346, 347, 348, 349, 350, - 351, 352, 353, 354, 355, 356, 357, 358, - 359, 360, 361, 362, 363, 364, 366, 368, - 370, 371, 372, 1980, 369, 374, 375, 377, - 378, 379, 380, 381, 382, 383, 384, 385, - 386, 387, 388, 389, 390, 391, 393, 394, - 396, 402, 425, 430, 432, 439, 443, 397, - 398, 399, 400, 401, 403, 404, 405, 406, - 407, 408, 409, 410, 411, 412, 413, 414, - 415, 416, 417, 418, 419, 420, 421, 422, - 423, 424, 426, 427, 428, 429, 431, 433, - 434, 435, 436, 437, 438, 440, 441, 442, - 444, 591, 592, 1981, 458, 459, 460, 461, - 462, 463, 464, 465, 466, 467, 468, 469, - 470, 471, 472, 473, 474, 475, 476, 477, - 478, 479, 480, 481, 482, 483, 484, 485, - 486, 488, 489, 490, 491, 492, 493, 494, - 495, 496, 497, 498, 499, 500, 501, 502, - 503, 504, 505, 506, 507, 508, 510, 511, - 512, 513, 514, 516, 517, 519, 520, 521, - 522, 523, 524, 525, 526, 527, 528, 529, - 530, 531, 532, 534, 535, 537, 543, 567, - 571, 573, 580, 584, 538, 539, 540, 541, - 542, 544, 545, 546, 547, 548, 549, 550, - 551, 552, 553, 554, 555, 556, 557, 558, - 559, 560, 561, 562, 563, 564, 565, 566, - 568, 569, 570, 572, 574, 575, 576, 577, - 578, 579, 581, 582, 583, 585, 587, 588, - 589, 445, 590, 446, 594, 595, 596, 302, - 597, 303, 599, 605, 606, 608, 610, 613, - 616, 640, 1982, 622, 1983, 612, 1984, 615, - 618, 620, 621, 624, 625, 629, 630, 631, - 632, 633, 634, 635, 1985, 628, 639, 642, - 643, 644, 645, 646, 649, 650, 651, 652, - 653, 654, 655, 656, 660, 661, 663, 664, - 647, 666, 669, 671, 673, 667, 668, 670, - 672, 674, 678, 679, 680, 681, 682, 683, - 684, 685, 686, 687, 1986, 676, 677, 690, - 691, 299, 695, 696, 698, 997, 1000, 1003, - 1027, 1974, 1987, 1974, 1988, 712, 713, 714, - 715, 716, 717, 718, 719, 720, 721, 722, - 723, 724, 725, 726, 727, 728, 729, 730, - 731, 732, 733, 734, 735, 736, 737, 738, - 739, 741, 742, 743, 744, 745, 746, 747, - 748, 749, 750, 751, 752, 753, 754, 755, - 756, 757, 758, 759, 760, 761, 763, 765, - 767, 768, 769, 1989, 766, 771, 772, 774, - 775, 776, 777, 778, 779, 780, 781, 782, - 783, 784, 785, 786, 787, 788, 790, 791, - 793, 799, 822, 827, 829, 836, 840, 794, - 795, 796, 797, 798, 800, 801, 802, 803, - 804, 805, 806, 807, 808, 809, 810, 811, - 812, 813, 814, 815, 816, 817, 818, 819, - 820, 821, 823, 824, 825, 826, 828, 830, - 831, 832, 833, 834, 835, 837, 838, 839, - 841, 988, 989, 1990, 855, 856, 857, 858, - 859, 860, 861, 862, 863, 864, 865, 866, - 867, 868, 869, 870, 871, 872, 873, 874, - 875, 876, 877, 878, 879, 880, 881, 882, - 883, 885, 886, 887, 888, 889, 890, 891, - 892, 893, 894, 895, 896, 897, 898, 899, - 900, 901, 902, 903, 904, 905, 907, 908, - 909, 910, 911, 913, 914, 916, 917, 918, - 919, 920, 921, 922, 923, 924, 925, 926, - 927, 928, 929, 931, 932, 934, 940, 964, - 968, 970, 977, 981, 935, 936, 937, 938, - 939, 941, 942, 943, 944, 945, 946, 947, - 948, 949, 950, 951, 952, 953, 954, 955, - 956, 957, 958, 959, 960, 961, 962, 963, - 965, 966, 967, 969, 971, 972, 973, 974, - 975, 976, 978, 979, 980, 982, 984, 985, - 986, 842, 987, 843, 991, 992, 993, 699, - 994, 700, 1009, 1991, 999, 1992, 1002, 1005, - 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, - 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, - 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, - 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, - 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, - 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, - 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, - 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, - 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, - 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, - 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, - 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, - 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, - 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, - 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, - 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, - 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, - 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, - 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, - 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, - 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, - 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, - 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, - 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, - 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, - 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, - 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, - 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, - 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, - 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, - 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, - 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, - 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, - 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, - 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, - 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, - 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, - 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, - 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, - 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, - 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, - 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, - 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, - 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, - 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, - 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, - 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, - 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, - 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, - 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, - 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, - 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, - 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, - 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, - 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, - 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, - 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, - 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, - 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, - 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, - 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, - 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, - 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, - 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, - 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, - 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, - 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, - 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, - 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, - 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, - 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, - 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, - 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, - 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, - 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, - 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, - 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, - 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, - 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, - 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, - 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, - 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, - 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, - 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, - 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, - 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, - 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, - 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, - 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, - 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, - 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, - 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, - 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, - 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, - 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, - 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, - 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, - 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, - 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, - 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, - 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, - 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, - 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, - 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, - 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, - 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, - 1973, 1974, 1, 1975, 299, 300, 301, 692, - 693, 694, 697, 1028, 1628, 1629, 1638, 1639, - 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, - 14, 43, 65, 73, 76, 92, 298, 293, - 67, 95, 147, 148, 149, 150, 151, 152, - 153, 154, 155, 156, 157, 187, 209, 215, - 218, 233, 236, 286, 1974, 600, 601, 602, - 603, 604, 607, 641, 648, 657, 658, 659, - 662, 665, 688, 689, 304, 305, 306, 307, - 308, 309, 310, 311, 312, 313, 314, 343, - 365, 373, 376, 392, 598, 593, 367, 395, - 447, 448, 449, 450, 451, 452, 453, 454, - 455, 456, 457, 487, 509, 515, 518, 533, - 536, 586, 609, 623, 636, 637, 638, 611, - 619, 614, 617, 626, 627, 675, 1974, 701, - 702, 703, 704, 705, 706, 707, 708, 709, - 710, 711, 996, 762, 770, 1010, 1023, 1024, - 1025, 789, 995, 990, 740, 773, 764, 792, - 844, 845, 846, 847, 848, 849, 850, 851, - 852, 853, 854, 884, 906, 912, 915, 930, - 933, 983, 998, 1006, 1001, 1004, 1013, 1014, - 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, - 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, - 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, - 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, - 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, - 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, - 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, - 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, - 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, - 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, - 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, - 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, - 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, - 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, - 1866, 1872, 1875, 1890, 1893, 1943, -} - -var _graphclust_trans_actions []byte = []byte{ - 31, 0, 27, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 34, 40, 25, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 40, 0, 40, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 29, 51, 17, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 51, 0, 51, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 40, 21, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 40, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 19, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 40, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 23, 40, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 43, 1, 47, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 15, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 13, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 9, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 7, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 11, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, -} - -var _graphclust_to_state_actions []byte = []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 37, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -} - -var _graphclust_from_state_actions []byte = []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -} - -var _graphclust_eof_trans []int16 = []int16{ - 0, 0, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 0, 0, 0, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 0, 0, 0, 0, - 0, 0, 610, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 610, 612, 612, - 610, 612, 612, 610, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 610, 612, - 612, 612, 612, 0, 0, 0, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 0, - 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1750, - 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, - 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, - 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, - 1983, 1983, 1983, 1983, -} - -const graphclust_start int = 1974 -const graphclust_first_final int = 1974 -const graphclust_error int = 0 - -const graphclust_en_main int = 1974 - - -// line 14 "grapheme_clusters.rl" - - -var Error = errors.New("invalid UTF8 text") - -// ScanGraphemeClusters is a split function for bufio.Scanner that splits -// on grapheme cluster boundaries. -func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { - if len(data) == 0 { - return 0, nil, nil - } - - // Ragel state - cs := 0 // Current State - p := 0 // "Pointer" into data - pe := len(data) // End-of-data "pointer" - ts := 0 - te := 0 - act := 0 - eof := pe - - // Make Go compiler happy - _ = ts - _ = te - _ = act - _ = eof - - startPos := 0 - endPos := 0 - - -// line 4976 "grapheme_clusters.go" - { - cs = graphclust_start - ts = 0 - te = 0 - act = 0 - } - -// line 4984 "grapheme_clusters.go" - { - var _klen int - var _trans int - var _acts int - var _nacts uint - var _keys int - if p == pe { - goto _test_eof - } - if cs == 0 { - goto _out - } -_resume: - _acts = int(_graphclust_from_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]); _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts - 1] { - case 4: -// line 1 "NONE" - -ts = p - -// line 5008 "grapheme_clusters.go" - } - } - - _keys = int(_graphclust_key_offsets[cs]) - _trans = int(_graphclust_index_offsets[cs]) - - _klen = int(_graphclust_single_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + _klen - 1) - for { - if _upper < _lower { - break - } - - _mid = _lower + ((_upper - _lower) >> 1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 1 - case data[p] > _graphclust_trans_keys[_mid]: - _lower = _mid + 1 - default: - _trans += int(_mid - int(_keys)) - goto _match - } - } - _keys += _klen - _trans += _klen - } - - _klen = int(_graphclust_range_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + (_klen << 1) - 2) - for { - if _upper < _lower { - break - } - - _mid = _lower + (((_upper - _lower) >> 1) & ^1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 2 - case data[p] > _graphclust_trans_keys[_mid + 1]: - _lower = _mid + 2 - default: - _trans += int((_mid - int(_keys)) >> 1) - goto _match - } - } - _trans += _klen - } - -_match: - _trans = int(_graphclust_indicies[_trans]) -_eof_trans: - cs = int(_graphclust_trans_targs[_trans]) - - if _graphclust_trans_actions[_trans] == 0 { - goto _again - } - - _acts = int(_graphclust_trans_actions[_trans]) - _nacts = uint(_graphclust_actions[_acts]); _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 0: -// line 46 "grapheme_clusters.rl" - - - startPos = p - - case 1: -// line 50 "grapheme_clusters.rl" - - - endPos = p - - case 5: -// line 1 "NONE" - -te = p+1 - - case 6: -// line 54 "grapheme_clusters.rl" - -act = 3; - case 7: -// line 54 "grapheme_clusters.rl" - -te = p+1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 8: -// line 54 "grapheme_clusters.rl" - -te = p+1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 9: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 10: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 11: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 12: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 13: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 14: -// line 54 "grapheme_clusters.rl" - -te = p -p-- -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 15: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 16: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 17: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 18: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 19: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 20: -// line 54 "grapheme_clusters.rl" - -p = (te) - 1 -{ - return endPos+1, data[startPos:endPos+1], nil - } - case 21: -// line 1 "NONE" - - switch act { - case 0: - {cs = 0 -goto _again -} - case 3: - {p = (te) - 1 - - return endPos+1, data[startPos:endPos+1], nil - } - } - -// line 5218 "grapheme_clusters.go" - } - } - -_again: - _acts = int(_graphclust_to_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]); _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 2: -// line 1 "NONE" - -ts = 0 - - case 3: -// line 1 "NONE" - -act = 0 - -// line 5238 "grapheme_clusters.go" - } - } - - if cs == 0 { - goto _out - } - p++ - if p != pe { - goto _resume - } - _test_eof: {} - if p == eof { - if _graphclust_eof_trans[cs] > 0 { - _trans = int(_graphclust_eof_trans[cs] - 1) - goto _eof_trans - } - } - - _out: {} - } - -// line 116 "grapheme_clusters.rl" - - - // If we fall out here then we were unable to complete a sequence. - // If we weren't able to complete a sequence then either we've - // reached the end of a partial buffer (so there's more data to come) - // or we have an isolated symbol that would normally be part of a - // grapheme cluster but has appeared in isolation here. - - if !atEOF { - // Request more - return 0, nil, nil - } - - // Just take the first UTF-8 sequence and return that. - _, seqLen := utf8.DecodeRune(data) - return seqLen, data[:seqLen], nil -} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl deleted file mode 100644 index 003ffbf5..00000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl +++ /dev/null @@ -1,132 +0,0 @@ -package textseg - -import ( - "errors" - "unicode/utf8" -) - -// Generated from grapheme_clusters.rl. DO NOT EDIT -%%{ - # (except you are actually in grapheme_clusters.rl here, so edit away!) - - machine graphclust; - write data; -}%% - -var Error = errors.New("invalid UTF8 text") - -// ScanGraphemeClusters is a split function for bufio.Scanner that splits -// on grapheme cluster boundaries. -func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { - if len(data) == 0 { - return 0, nil, nil - } - - // Ragel state - cs := 0 // Current State - p := 0 // "Pointer" into data - pe := len(data) // End-of-data "pointer" - ts := 0 - te := 0 - act := 0 - eof := pe - - // Make Go compiler happy - _ = ts - _ = te - _ = act - _ = eof - - startPos := 0 - endPos := 0 - - %%{ - include GraphemeCluster "grapheme_clusters_table.rl"; - - action start { - startPos = p - } - - action end { - endPos = p - } - - action emit { - return endPos+1, data[startPos:endPos+1], nil - } - - ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?; - AnyExtender = Extend | ZWJGlue | SpacingMark; - Extension = AnyExtender*; - ReplacementChar = (0xEF 0xBF 0xBD); - - CRLFSeq = CR LF; - ControlSeq = Control | ReplacementChar; - HangulSeq = ( - L+ (((LV? V+ | LVT) T*)?|LV?) | - LV V* T* | - V+ T* | - LVT T* | - T+ - ) Extension; - EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension; - ZWJSeq = ZWJGlue Extension; - EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension; - - UTF8Cont = 0x80 .. 0xBF; - AnyUTF8 = ( - 0x00..0x7F | - 0xC0..0xDF . UTF8Cont | - 0xE0..0xEF . UTF8Cont . UTF8Cont | - 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont - ); - - # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension - OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension; - - # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break - PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?; - - CRLFTok = CRLFSeq >start @end; - ControlTok = ControlSeq >start @end; - HangulTok = HangulSeq >start @end; - EmojiTok = EmojiSeq >start @end; - ZWJTok = ZWJSeq >start @end; - EmojiFlagTok = EmojiFlagSeq >start @end; - OtherTok = OtherSeq >start @end; - PrependTok = PrependSeq >start @end; - - main := |* - CRLFTok => emit; - ControlTok => emit; - HangulTok => emit; - EmojiTok => emit; - ZWJTok => emit; - EmojiFlagTok => emit; - PrependTok => emit; - OtherTok => emit; - - # any single valid UTF-8 character would also be valid per spec, - # but we'll handle that separately after the loop so we can deal - # with requesting more bytes if we're not at EOF. - *|; - - write init; - write exec; - }%% - - // If we fall out here then we were unable to complete a sequence. - // If we weren't able to complete a sequence then either we've - // reached the end of a partial buffer (so there's more data to come) - // or we have an isolated symbol that would normally be part of a - // grapheme cluster but has appeared in isolation here. - - if !atEOF { - // Request more - return 0, nil, nil - } - - // Just take the first UTF-8 sequence and return that. - _, seqLen := utf8.DecodeRune(data) - return seqLen, data[:seqLen], nil -} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl deleted file mode 100644 index fb451182..00000000 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl +++ /dev/null @@ -1,1583 +0,0 @@ -# The following Ragel file was autogenerated with unicode2ragel.rb -# from: http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -# -# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "E_Base", "E_Modifier", "ZWJ", "Glue_After_Zwj", "E_Base_GAZ"]. -# -# To use this, make sure that your alphtype is set to byte, -# and that your input is in utf8. - -%%{ - machine GraphemeCluster; - - Prepend = - 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... - | 0xDB 0x9D #Cf ARABIC END OF AYAH - | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK - | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH - | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH - | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN - | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... - ; - - CR = - 0x0D #Cc - ; - - LF = - 0x0A #Cc - ; - - Control = - 0x00..0x09 #Cc [10] .. - | 0x0B..0x0C #Cc [2] .. - | 0x0E..0x1F #Cc [18] .. - | 0x7F #Cc [33] .. - | 0xC2 0x80..0x9F # - | 0xC2 0xAD #Cf SOFT HYPHEN - | 0xD8 0x9C #Cf ARABIC LETTER MARK - | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR - | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE - | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... - | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR - | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR - | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... - | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS - | 0xE2 0x81 0xA5 #Cn - | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... - | 0xED 0xA0 0x80..0xFF #Cs [2048] .... - | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... - | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... - | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... - | 0xF3 0xA0 0x80 0x80 #Cn - | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG - | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. - | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. - | 0xF3 0xA0 0x83 0x00..0xBF # - | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] .. -# -# This script uses the unicode spec to generate a Ragel state machine -# that recognizes unicode alphanumeric characters. It generates 5 -# character classes: uupper, ulower, ualpha, udigit, and ualnum. -# Currently supported encodings are UTF-8 [default] and UCS-4. -# -# Usage: unicode2ragel.rb [options] -# -e, --encoding [ucs4 | utf8] Data encoding -# -h, --help Show this message -# -# This script was originally written as part of the Ferret search -# engine library. -# -# Author: Rakan El-Khalil - -require 'optparse' -require 'open-uri' - -ENCODINGS = [ :utf8, :ucs4 ] -ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } -DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" -DEFAULT_MACHINE_NAME= "WChar" - -### -# Display vars & default option - -TOTAL_WIDTH = 80 -RANGE_WIDTH = 23 -@encoding = :utf8 -@chart_url = DEFAULT_CHART_URL -machine_name = DEFAULT_MACHINE_NAME -properties = [] -@output = $stdout - -### -# Option parsing - -cli_opts = OptionParser.new do |opts| - opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| - @encoding = o.downcase.to_sym - end - opts.on("-h", "--help", "Show this message") do - puts opts - exit - end - opts.on("-u", "--url URL", "URL to process") do |o| - @chart_url = o - end - opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| - machine_name = o - end - opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| - properties = o - end - opts.on("-o", "--output FILE", "output file") do |o| - @output = File.new(o, "w+") - end -end - -cli_opts.parse(ARGV) -unless ENCODINGS.member? @encoding - puts "Invalid encoding: #{@encoding}" - puts cli_opts - exit -end - -## -# Downloads the document at url and yields every alpha line's hex -# range and description. - -def each_alpha( url, property ) - open( url ) do |file| - file.each_line do |line| - next if line =~ /^#/; - next if line !~ /; #{property} #/; - - range, description = line.split(/;/) - range.strip! - description.gsub!(/.*#/, '').strip! - - if range =~ /\.\./ - start, stop = range.split '..' - else start = stop = range - end - - yield start.hex .. stop.hex, description - end - end -end - -### -# Formats to hex at minimum width - -def to_hex( n ) - r = "%0X" % n - r = "0#{r}" unless (r.length % 2).zero? - r -end - -### -# UCS4 is just a straight hex conversion of the unicode codepoint. - -def to_ucs4( range ) - rangestr = "0x" + to_hex(range.begin) - rangestr << "..0x" + to_hex(range.end) if range.begin != range.end - [ rangestr ] -end - -## -# 0x00 - 0x7f -> 0zzzzzzz[7] -# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] -# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] -# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] - -UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] - -def to_utf8_enc( n ) - r = 0 - if n <= 0x7f - r = n - elsif n <= 0x7ff - y = 0xc0 | (n >> 6) - z = 0x80 | (n & 0x3f) - r = y << 8 | z - elsif n <= 0xffff - x = 0xe0 | (n >> 12) - y = 0x80 | (n >> 6) & 0x3f - z = 0x80 | n & 0x3f - r = x << 16 | y << 8 | z - elsif n <= 0x10ffff - w = 0xf0 | (n >> 18) - x = 0x80 | (n >> 12) & 0x3f - y = 0x80 | (n >> 6) & 0x3f - z = 0x80 | n & 0x3f - r = w << 24 | x << 16 | y << 8 | z - end - - to_hex(r) -end - -def from_utf8_enc( n ) - n = n.hex - r = 0 - if n <= 0x7f - r = n - elsif n <= 0xdfff - y = (n >> 8) & 0x1f - z = n & 0x3f - r = y << 6 | z - elsif n <= 0xefffff - x = (n >> 16) & 0x0f - y = (n >> 8) & 0x3f - z = n & 0x3f - r = x << 10 | y << 6 | z - elsif n <= 0xf7ffffff - w = (n >> 24) & 0x07 - x = (n >> 16) & 0x3f - y = (n >> 8) & 0x3f - z = n & 0x3f - r = w << 18 | x << 12 | y << 6 | z - end - r -end - -### -# Given a range, splits it up into ranges that can be continuously -# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] -# This is not strictly needed since the current [5.1] unicode standard -# doesn't have ranges that straddle utf8 boundaries. This is included -# for completeness as there is no telling if that will ever change. - -def utf8_ranges( range ) - ranges = [] - UTF8_BOUNDARIES.each do |max| - if range.begin <= max - if range.end <= max - ranges << range - return ranges - end - - ranges << (range.begin .. max) - range = (max + 1) .. range.end - end - end - ranges -end - -def build_range( start, stop ) - size = start.size/2 - left = size - 1 - return [""] if size < 1 - - a = start[0..1] - b = stop[0..1] - - ### - # Shared prefix - - if a == b - return build_range(start[2..-1], stop[2..-1]).map do |elt| - "0x#{a} " + elt - end - end - - ### - # Unshared prefix, end of run - - return ["0x#{a}..0x#{b} "] if left.zero? - - ### - # Unshared prefix, not end of run - # Range can be 0x123456..0x56789A - # Which is equivalent to: - # 0x123456 .. 0x12FFFF - # 0x130000 .. 0x55FFFF - # 0x560000 .. 0x56789A - - ret = [] - ret << build_range(start, a + "FF" * left) - - ### - # Only generate middle range if need be. - - if a.hex+1 != b.hex - max = to_hex(b.hex - 1) - max = "FF" if b == "FF" - ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left - end - - ### - # Don't generate last range if it is covered by first range - - ret << build_range(b + "00" * left, stop) unless b == "FF" - ret.flatten! -end - -def to_utf8( range ) - utf8_ranges( range ).map do |r| - begin_enc = to_utf8_enc(r.begin) - end_enc = to_utf8_enc(r.end) - build_range begin_enc, end_enc - end.flatten! -end - -## -# Perform a 3-way comparison of the number of codepoints advertised by -# the unicode spec for the given range, the originally parsed range, -# and the resulting utf8 encoded range. - -def count_codepoints( code ) - code.split(' ').inject(1) do |acc, elt| - if elt =~ /0x(.+)\.\.0x(.+)/ - if @encoding == :utf8 - acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) - else - acc * ($2.hex - $1.hex + 1) - end - else - acc - end - end -end - -def is_valid?( range, desc, codes ) - spec_count = 1 - spec_count = $1.to_i if desc =~ /\[(\d+)\]/ - range_count = range.end - range.begin + 1 - - sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } - sum == spec_count and sum == range_count -end - -## -# Generate the state maching to stdout - -def generate_machine( name, property ) - pipe = " " - @output.puts " #{name} = " - each_alpha( @chart_url, property ) do |range, desc| - - codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) - - #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless - # is_valid? range, desc, codes - - range_width = codes.map { |a| a.size }.max - range_width = RANGE_WIDTH if range_width < RANGE_WIDTH - - desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 - desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH - - if desc.size > desc_width - desc = desc[0..desc_width - 4] + "..." - end - - codes.each_with_index do |r, idx| - desc = "" unless idx.zero? - code = "%-#{range_width}s" % r - @output.puts " #{pipe} #{code} ##{desc}" - pipe = "|" - end - end - @output.puts " ;" - @output.puts "" -end - -@output.puts < - - -### Backers - -Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) - - - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE deleted file mode 100644 index 2f9a31fa..00000000 --- a/vendor/github.com/asaskevich/govalidator/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Alex Saskevich - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md deleted file mode 100644 index 40f9a878..00000000 --- a/vendor/github.com/asaskevich/govalidator/README.md +++ /dev/null @@ -1,507 +0,0 @@ -govalidator -=========== -[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043) -[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) - -A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). - -#### Installation -Make sure that Go is installed on your computer. -Type the following command in your terminal: - - go get github.com/asaskevich/govalidator - -or you can get specified release of the package with `gopkg.in`: - - go get gopkg.in/asaskevich/govalidator.v4 - -After it the package is ready to use. - - -#### Import package in your project -Add following line in your `*.go` file: -```go -import "github.com/asaskevich/govalidator" -``` -If you are unhappy to use long `govalidator`, you can do something like this: -```go -import ( - valid "github.com/asaskevich/govalidator" -) -``` - -#### Activate behavior to require all fields have a validation tag by default -`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. - -`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. - -```go -import "github.com/asaskevich/govalidator" - -func init() { - govalidator.SetFieldsRequiredByDefault(true) -} -``` - -Here's some code to explain it: -```go -// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -type exampleStruct struct { - Name string `` - Email string `valid:"email"` -} - -// this, however, will only fail when Email is empty or an invalid email address: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email"` -} - -// lastly, this will only fail when Email is an invalid email address but not when it's empty: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email,optional"` -} -``` - -#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) -##### Custom validator function signature -A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. -```go -import "github.com/asaskevich/govalidator" - -// old signature -func(i interface{}) bool - -// new signature -func(i interface{}, o interface{}) bool -``` - -##### Adding a custom validator -This was changed to prevent data races when accessing custom validators. -```go -import "github.com/asaskevich/govalidator" - -// before -govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool { - // ... -}) - -// after -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { - // ... -})) -``` - -#### List of functions: -```go -func Abs(value float64) float64 -func BlackList(str, chars string) string -func ByteLength(str string, params ...string) bool -func CamelCaseToUnderscore(str string) string -func Contains(str, substring string) bool -func Count(array []interface{}, iterator ConditionIterator) int -func Each(array []interface{}, iterator Iterator) -func ErrorByField(e error, field string) string -func ErrorsByField(e error) map[string]string -func Filter(array []interface{}, iterator ConditionIterator) []interface{} -func Find(array []interface{}, iterator ConditionIterator) interface{} -func GetLine(s string, index int) (string, error) -func GetLines(s string) []string -func InRange(value, left, right float64) bool -func IsASCII(str string) bool -func IsAlpha(str string) bool -func IsAlphanumeric(str string) bool -func IsBase64(str string) bool -func IsByteLength(str string, min, max int) bool -func IsCIDR(str string) bool -func IsCreditCard(str string) bool -func IsDNSName(str string) bool -func IsDataURI(str string) bool -func IsDialString(str string) bool -func IsDivisibleBy(str, num string) bool -func IsEmail(str string) bool -func IsFilePath(str string) (bool, int) -func IsFloat(str string) bool -func IsFullWidth(str string) bool -func IsHalfWidth(str string) bool -func IsHexadecimal(str string) bool -func IsHexcolor(str string) bool -func IsHost(str string) bool -func IsIP(str string) bool -func IsIPv4(str string) bool -func IsIPv6(str string) bool -func IsISBN(str string, version int) bool -func IsISBN10(str string) bool -func IsISBN13(str string) bool -func IsISO3166Alpha2(str string) bool -func IsISO3166Alpha3(str string) bool -func IsISO693Alpha2(str string) bool -func IsISO693Alpha3b(str string) bool -func IsISO4217(str string) bool -func IsIn(str string, params ...string) bool -func IsInt(str string) bool -func IsJSON(str string) bool -func IsLatitude(str string) bool -func IsLongitude(str string) bool -func IsLowerCase(str string) bool -func IsMAC(str string) bool -func IsMongoID(str string) bool -func IsMultibyte(str string) bool -func IsNatural(value float64) bool -func IsNegative(value float64) bool -func IsNonNegative(value float64) bool -func IsNonPositive(value float64) bool -func IsNull(str string) bool -func IsNumeric(str string) bool -func IsPort(str string) bool -func IsPositive(value float64) bool -func IsPrintableASCII(str string) bool -func IsRFC3339(str string) bool -func IsRFC3339WithoutZone(str string) bool -func IsRGBcolor(str string) bool -func IsRequestURI(rawurl string) bool -func IsRequestURL(rawurl string) bool -func IsSSN(str string) bool -func IsSemver(str string) bool -func IsTime(str string, format string) bool -func IsURL(str string) bool -func IsUTFDigit(str string) bool -func IsUTFLetter(str string) bool -func IsUTFLetterNumeric(str string) bool -func IsUTFNumeric(str string) bool -func IsUUID(str string) bool -func IsUUIDv3(str string) bool -func IsUUIDv4(str string) bool -func IsUUIDv5(str string) bool -func IsUpperCase(str string) bool -func IsVariableWidth(str string) bool -func IsWhole(value float64) bool -func LeftTrim(str, chars string) string -func Map(array []interface{}, iterator ResultIterator) []interface{} -func Matches(str, pattern string) bool -func NormalizeEmail(str string) (string, error) -func PadBoth(str string, padStr string, padLen int) string -func PadLeft(str string, padStr string, padLen int) string -func PadRight(str string, padStr string, padLen int) string -func Range(str string, params ...string) bool -func RemoveTags(s string) string -func ReplacePattern(str, pattern, replace string) string -func Reverse(s string) string -func RightTrim(str, chars string) string -func RuneLength(str string, params ...string) bool -func SafeFileName(str string) string -func SetFieldsRequiredByDefault(value bool) -func Sign(value float64) float64 -func StringLength(str string, params ...string) bool -func StringMatches(s string, params ...string) bool -func StripLow(str string, keepNewLines bool) string -func ToBoolean(str string) (bool, error) -func ToFloat(str string) (float64, error) -func ToInt(str string) (int64, error) -func ToJSON(obj interface{}) (string, error) -func ToString(obj interface{}) string -func Trim(str, chars string) string -func Truncate(str string, length int, ending string) string -func UnderscoreToCamelCase(s string) string -func ValidateStruct(s interface{}) (bool, error) -func WhiteList(str, chars string) string -type ConditionIterator -type CustomTypeValidator -type Error -func (e Error) Error() string -type Errors -func (es Errors) Error() string -func (es Errors) Errors() []error -type ISO3166Entry -type Iterator -type ParamValidator -type ResultIterator -type UnsupportedTypeError -func (e *UnsupportedTypeError) Error() string -type Validator -``` - -#### Examples -###### IsURL -```go -println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) -``` -###### ToString -```go -type User struct { - FirstName string - LastName string -} - -str := govalidator.ToString(&User{"John", "Juan"}) -println(str) -``` -###### Each, Map, Filter, Count for slices -Each iterates over the slice/array and calls Iterator for every item -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.Iterator = func(value interface{}, index int) { - println(value.(int)) -} -govalidator.Each(data, fn) -``` -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { - return value.(int) * 3 -} -_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} -``` -```go -data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} -var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { - return value.(int)%2 == 0 -} -_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} -_ = govalidator.Count(data, fn) // result = 5 -``` -###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) -If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: -```go -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) -``` -For completely custom validators (interface-based), see below. - -Here is a list of available validators for struct fields (validator - used function): -```go -"email": IsEmail, -"url": IsURL, -"dialstring": IsDialString, -"requrl": IsRequestURL, -"requri": IsRequestURI, -"alpha": IsAlpha, -"utfletter": IsUTFLetter, -"alphanum": IsAlphanumeric, -"utfletternum": IsUTFLetterNumeric, -"numeric": IsNumeric, -"utfnumeric": IsUTFNumeric, -"utfdigit": IsUTFDigit, -"hexadecimal": IsHexadecimal, -"hexcolor": IsHexcolor, -"rgbcolor": IsRGBcolor, -"lowercase": IsLowerCase, -"uppercase": IsUpperCase, -"int": IsInt, -"float": IsFloat, -"null": IsNull, -"uuid": IsUUID, -"uuidv3": IsUUIDv3, -"uuidv4": IsUUIDv4, -"uuidv5": IsUUIDv5, -"creditcard": IsCreditCard, -"isbn10": IsISBN10, -"isbn13": IsISBN13, -"json": IsJSON, -"multibyte": IsMultibyte, -"ascii": IsASCII, -"printableascii": IsPrintableASCII, -"fullwidth": IsFullWidth, -"halfwidth": IsHalfWidth, -"variablewidth": IsVariableWidth, -"base64": IsBase64, -"datauri": IsDataURI, -"ip": IsIP, -"port": IsPort, -"ipv4": IsIPv4, -"ipv6": IsIPv6, -"dns": IsDNSName, -"host": IsHost, -"mac": IsMAC, -"latitude": IsLatitude, -"longitude": IsLongitude, -"ssn": IsSSN, -"semver": IsSemver, -"rfc3339": IsRFC3339, -"rfc3339WithoutZone": IsRFC3339WithoutZone, -"ISO3166Alpha2": IsISO3166Alpha2, -"ISO3166Alpha3": IsISO3166Alpha3, -``` -Validators with parameters - -```go -"range(min|max)": Range, -"length(min|max)": ByteLength, -"runelength(min|max)": RuneLength, -"stringlength(min|max)": StringLength, -"matches(pattern)": StringMatches, -"in(string1|string2|...|stringN)": IsIn, -"rsapub(keylength)" : IsRsaPub, -``` - -And here is small example of usage: -```go -type Post struct { - Title string `valid:"alphanum,required"` - Message string `valid:"duck,ascii"` - Message2 string `valid:"animal(dog)"` - AuthorIP string `valid:"ipv4"` - Date string `valid:"-"` -} -post := &Post{ - Title: "My Example Post", - Message: "duck", - Message2: "dog", - AuthorIP: "123.234.54.3", -} - -// Add your own struct validation tags -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) - -// Add your own struct validation tags with parameter -govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { - species := params[0] - return str == species -}) -govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") - -result, err := govalidator.ValidateStruct(post) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### WhiteList -```go -// Remove all characters from string ignoring characters between "a" and "z" -println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") -``` - -###### Custom validation functions -Custom validation using your own domain specific validators is also available - here's an example of how to use it: -```go -import "github.com/asaskevich/govalidator" - -type CustomByteArray [6]byte // custom types are supported and can be validated - -type StructWithCustomByteArray struct { - ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence - Email string `valid:"email"` - CustomMinLength int `valid:"-"` -} - -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { - switch v := context.(type) { // you can type switch on the context interface being validated - case StructWithCustomByteArray: - // you can check and validate against some other field in the context, - // return early or not validate against the context at all – your choice - case SomeOtherType: - // ... - default: - // expecting some other type? Throw/panic here or continue - } - - switch v := i.(type) { // type switch on the struct field being validated - case CustomByteArray: - for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes - if e != 0 { - return true - } - } - } - return false -})) -govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { - switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation - case StructWithCustomByteArray: - return len(v.ID) >= v.CustomMinLength - } - return false -})) -``` - -###### Custom error messages -Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: -```go -type Ticket struct { - Id int64 `json:"id"` - FirstName string `json:"firstname" valid:"required~First name is blank"` -} -``` - -#### Notes -Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). -Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). - -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Credits -### Contributors - -This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. - -#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) -* [Daniel Lohse](https://github.com/annismckenzie) -* [Attila Oláh](https://github.com/attilaolah) -* [Daniel Korner](https://github.com/Dadie) -* [Steven Wilkin](https://github.com/stevenwilkin) -* [Deiwin Sarjas](https://github.com/deiwin) -* [Noah Shibley](https://github.com/slugmobile) -* [Nathan Davies](https://github.com/nathj07) -* [Matt Sanford](https://github.com/mzsanford) -* [Simon ccl1115](https://github.com/ccl1115) - - - - -### Backers - -Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] - - - - - - - - - - - - - - - -## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go deleted file mode 100644 index 5bace265..00000000 --- a/vendor/github.com/asaskevich/govalidator/arrays.go +++ /dev/null @@ -1,58 +0,0 @@ -package govalidator - -// Iterator is the function that accepts element of slice/array and its index -type Iterator func(interface{}, int) - -// ResultIterator is the function that accepts element of slice/array and its index and returns any result -type ResultIterator func(interface{}, int) interface{} - -// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean -type ConditionIterator func(interface{}, int) bool - -// Each iterates over the slice and apply Iterator to every item -func Each(array []interface{}, iterator Iterator) { - for index, data := range array { - iterator(data, index) - } -} - -// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. -func Map(array []interface{}, iterator ResultIterator) []interface{} { - var result = make([]interface{}, len(array)) - for index, data := range array { - result[index] = iterator(data, index) - } - return result -} - -// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. -func Find(array []interface{}, iterator ConditionIterator) interface{} { - for index, data := range array { - if iterator(data, index) { - return data - } - } - return nil -} - -// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. -func Filter(array []interface{}, iterator ConditionIterator) []interface{} { - var result = make([]interface{}, 0) - for index, data := range array { - if iterator(data, index) { - result = append(result, data) - } - } - return result -} - -// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. -func Count(array []interface{}, iterator ConditionIterator) int { - count := 0 - for index, data := range array { - if iterator(data, index) { - count = count + 1 - } - } - return count -} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go deleted file mode 100644 index cf1e5d56..00000000 --- a/vendor/github.com/asaskevich/govalidator/converter.go +++ /dev/null @@ -1,64 +0,0 @@ -package govalidator - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" -) - -// ToString convert the input to a string. -func ToString(obj interface{}) string { - res := fmt.Sprintf("%v", obj) - return string(res) -} - -// ToJSON convert the input to a valid JSON string -func ToJSON(obj interface{}) (string, error) { - res, err := json.Marshal(obj) - if err != nil { - res = []byte("") - } - return string(res), err -} - -// ToFloat convert the input string to a float, or 0.0 if the input is not a float. -func ToFloat(str string) (float64, error) { - res, err := strconv.ParseFloat(str, 64) - if err != nil { - res = 0.0 - } - return res, err -} - -// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. -func ToInt(value interface{}) (res int64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = val.Int() - case uint, uint8, uint16, uint32, uint64: - res = int64(val.Uint()) - case string: - if IsInt(val.String()) { - res, err = strconv.ParseInt(val.String(), 0, 64) - if err != nil { - res = 0 - } - } else { - err = fmt.Errorf("math: square root of negative number %g", value) - res = 0 - } - default: - err = fmt.Errorf("math: square root of negative number %g", value) - res = 0 - } - - return -} - -// ToBoolean convert the input string to a boolean. -func ToBoolean(str string) (bool, error) { - return strconv.ParseBool(str) -} diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go deleted file mode 100644 index 655b750c..00000000 --- a/vendor/github.com/asaskevich/govalidator/error.go +++ /dev/null @@ -1,43 +0,0 @@ -package govalidator - -import "strings" - -// Errors is an array of multiple errors and conforms to the error interface. -type Errors []error - -// Errors returns itself. -func (es Errors) Errors() []error { - return es -} - -func (es Errors) Error() string { - var errs []string - for _, e := range es { - errs = append(errs, e.Error()) - } - return strings.Join(errs, ";") -} - -// Error encapsulates a name, an error and whether there's a custom error message or not. -type Error struct { - Name string - Err error - CustomErrorMessageExists bool - - // Validator indicates the name of the validator that failed - Validator string - Path []string -} - -func (e Error) Error() string { - if e.CustomErrorMessageExists { - return e.Err.Error() - } - - errName := e.Name - if len(e.Path) > 0 { - errName = strings.Join(append(e.Path, e.Name), ".") - } - - return errName + ": " + e.Err.Error() -} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go deleted file mode 100644 index 7e6c652e..00000000 --- a/vendor/github.com/asaskevich/govalidator/numerics.go +++ /dev/null @@ -1,97 +0,0 @@ -package govalidator - -import ( - "math" - "reflect" -) - -// Abs returns absolute value of number -func Abs(value float64) float64 { - return math.Abs(value) -} - -// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise -func Sign(value float64) float64 { - if value > 0 { - return 1 - } else if value < 0 { - return -1 - } else { - return 0 - } -} - -// IsNegative returns true if value < 0 -func IsNegative(value float64) bool { - return value < 0 -} - -// IsPositive returns true if value > 0 -func IsPositive(value float64) bool { - return value > 0 -} - -// IsNonNegative returns true if value >= 0 -func IsNonNegative(value float64) bool { - return value >= 0 -} - -// IsNonPositive returns true if value <= 0 -func IsNonPositive(value float64) bool { - return value <= 0 -} - -// InRange returns true if value lies between left and right border -func InRangeInt(value, left, right interface{}) bool { - value64, _ := ToInt(value) - left64, _ := ToInt(left) - right64, _ := ToInt(right) - if left64 > right64 { - left64, right64 = right64, left64 - } - return value64 >= left64 && value64 <= right64 -} - -// InRange returns true if value lies between left and right border -func InRangeFloat32(value, left, right float32) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRange returns true if value lies between left and right border -func InRangeFloat64(value, left, right float64) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type -func InRange(value interface{}, left interface{}, right interface{}) bool { - - reflectValue := reflect.TypeOf(value).Kind() - reflectLeft := reflect.TypeOf(left).Kind() - reflectRight := reflect.TypeOf(right).Kind() - - if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int { - return InRangeInt(value.(int), left.(int), right.(int)) - } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 { - return InRangeFloat32(value.(float32), left.(float32), right.(float32)) - } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 { - return InRangeFloat64(value.(float64), left.(float64), right.(float64)) - } else { - return false - } -} - -// IsWhole returns true if value is whole number -func IsWhole(value float64) bool { - return math.Remainder(value, 1) == 0 -} - -// IsNatural returns true if value is natural number (positive and whole) -func IsNatural(value float64) bool { - return IsWhole(value) && IsPositive(value) -} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go deleted file mode 100644 index 61a05d43..00000000 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ /dev/null @@ -1,101 +0,0 @@ -package govalidator - -import "regexp" - -// Basic regular expressions for validating strings -const ( - Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" - CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$" - ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" - ISBN13 string = "^(?:[0-9]{13})$" - UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" - UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" - Alpha string = "^[a-zA-Z]+$" - Alphanumeric string = "^[a-zA-Z0-9]+$" - Numeric string = "^[0-9]+$" - Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" - Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" - Hexadecimal string = "^[0-9a-fA-F]+$" - Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" - ASCII string = "^[\x00-\x7F]+$" - Multibyte string = "[^\x00-\x7F]" - FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" - PrintableASCII string = "^[\x20-\x7E]+$" - DataURI string = "^data:.+\\/(.+);base64$" - Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" - Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" - DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` - IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` - URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` - URLUsername string = `(\S+(:\S*)?@)` - URLPath string = `((\/|\?|#)[^\s]*)` - URLPort string = `(:(\d{1,5}))` - URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))` - URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` - URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` - SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` - WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixPath string = `^(/[^/\x00]*)+/?$` - Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" - tagName string = "valid" - hasLowerCase string = ".*[[:lower:]]" - hasUpperCase string = ".*[[:upper:]]" - hasWhitespace string = ".*[[:space:]]" - hasWhitespaceOnly string = "^[[:space:]]+$" -) - -// Used by IsFilePath func -const ( - // Unknown is unresolved OS type - Unknown = iota - // Win is Windows type - Win - // Unix is *nix OS types - Unix -) - -var ( - userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") - hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") - userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") - rxEmail = regexp.MustCompile(Email) - rxCreditCard = regexp.MustCompile(CreditCard) - rxISBN10 = regexp.MustCompile(ISBN10) - rxISBN13 = regexp.MustCompile(ISBN13) - rxUUID3 = regexp.MustCompile(UUID3) - rxUUID4 = regexp.MustCompile(UUID4) - rxUUID5 = regexp.MustCompile(UUID5) - rxUUID = regexp.MustCompile(UUID) - rxAlpha = regexp.MustCompile(Alpha) - rxAlphanumeric = regexp.MustCompile(Alphanumeric) - rxNumeric = regexp.MustCompile(Numeric) - rxInt = regexp.MustCompile(Int) - rxFloat = regexp.MustCompile(Float) - rxHexadecimal = regexp.MustCompile(Hexadecimal) - rxHexcolor = regexp.MustCompile(Hexcolor) - rxRGBcolor = regexp.MustCompile(RGBcolor) - rxASCII = regexp.MustCompile(ASCII) - rxPrintableASCII = regexp.MustCompile(PrintableASCII) - rxMultibyte = regexp.MustCompile(Multibyte) - rxFullWidth = regexp.MustCompile(FullWidth) - rxHalfWidth = regexp.MustCompile(HalfWidth) - rxBase64 = regexp.MustCompile(Base64) - rxDataURI = regexp.MustCompile(DataURI) - rxLatitude = regexp.MustCompile(Latitude) - rxLongitude = regexp.MustCompile(Longitude) - rxDNSName = regexp.MustCompile(DNSName) - rxURL = regexp.MustCompile(URL) - rxSSN = regexp.MustCompile(SSN) - rxWinPath = regexp.MustCompile(WinPath) - rxUnixPath = regexp.MustCompile(UnixPath) - rxSemver = regexp.MustCompile(Semver) - rxHasLowerCase = regexp.MustCompile(hasLowerCase) - rxHasUpperCase = regexp.MustCompile(hasUpperCase) - rxHasWhitespace = regexp.MustCompile(hasWhitespace) - rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) -) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go deleted file mode 100644 index 4f7e9274..00000000 --- a/vendor/github.com/asaskevich/govalidator/types.go +++ /dev/null @@ -1,636 +0,0 @@ -package govalidator - -import ( - "reflect" - "regexp" - "sort" - "sync" -) - -// Validator is a wrapper for a validator function that returns bool and accepts string. -type Validator func(str string) bool - -// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. -// The second parameter should be the context (in the case of validating a struct: the whole object being validated). -type CustomTypeValidator func(i interface{}, o interface{}) bool - -// ParamValidator is a wrapper for validator functions that accepts additional parameters. -type ParamValidator func(str string, params ...string) bool -type tagOptionsMap map[string]tagOption - -func (t tagOptionsMap) orderedKeys() []string { - var keys []string - for k := range t { - keys = append(keys, k) - } - - sort.Slice(keys, func(a, b int) bool { - return t[keys[a]].order < t[keys[b]].order - }) - - return keys -} - -type tagOption struct { - name string - customErrorMessage string - order int -} - -// UnsupportedTypeError is a wrapper for reflect.Type -type UnsupportedTypeError struct { - Type reflect.Type -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflect.Value - -// ParamTagMap is a map of functions accept variants parameters -var ParamTagMap = map[string]ParamValidator{ - "length": ByteLength, - "range": Range, - "runelength": RuneLength, - "stringlength": StringLength, - "matches": StringMatches, - "in": isInRaw, - "rsapub": IsRsaPub, -} - -// ParamTagRegexMap maps param tags to their respective regexes. -var ParamTagRegexMap = map[string]*regexp.Regexp{ - "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), - "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), - "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), - "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), - "in": regexp.MustCompile(`^in\((.*)\)`), - "matches": regexp.MustCompile(`^matches\((.+)\)$`), - "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), -} - -type customTypeTagMap struct { - validators map[string]CustomTypeValidator - - sync.RWMutex -} - -func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { - tm.RLock() - defer tm.RUnlock() - v, ok := tm.validators[name] - return v, ok -} - -func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { - tm.Lock() - defer tm.Unlock() - tm.validators[name] = ctv -} - -// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. -// Use this to validate compound or custom types that need to be handled as a whole, e.g. -// `type UUID [16]byte` (this would be handled as an array of bytes). -var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} - -// TagMap is a map of functions, that can be used as tags for ValidateStruct function. -var TagMap = map[string]Validator{ - "email": IsEmail, - "url": IsURL, - "dialstring": IsDialString, - "requrl": IsRequestURL, - "requri": IsRequestURI, - "alpha": IsAlpha, - "utfletter": IsUTFLetter, - "alphanum": IsAlphanumeric, - "utfletternum": IsUTFLetterNumeric, - "numeric": IsNumeric, - "utfnumeric": IsUTFNumeric, - "utfdigit": IsUTFDigit, - "hexadecimal": IsHexadecimal, - "hexcolor": IsHexcolor, - "rgbcolor": IsRGBcolor, - "lowercase": IsLowerCase, - "uppercase": IsUpperCase, - "int": IsInt, - "float": IsFloat, - "null": IsNull, - "uuid": IsUUID, - "uuidv3": IsUUIDv3, - "uuidv4": IsUUIDv4, - "uuidv5": IsUUIDv5, - "creditcard": IsCreditCard, - "isbn10": IsISBN10, - "isbn13": IsISBN13, - "json": IsJSON, - "multibyte": IsMultibyte, - "ascii": IsASCII, - "printableascii": IsPrintableASCII, - "fullwidth": IsFullWidth, - "halfwidth": IsHalfWidth, - "variablewidth": IsVariableWidth, - "base64": IsBase64, - "datauri": IsDataURI, - "ip": IsIP, - "port": IsPort, - "ipv4": IsIPv4, - "ipv6": IsIPv6, - "dns": IsDNSName, - "host": IsHost, - "mac": IsMAC, - "latitude": IsLatitude, - "longitude": IsLongitude, - "ssn": IsSSN, - "semver": IsSemver, - "rfc3339": IsRFC3339, - "rfc3339WithoutZone": IsRFC3339WithoutZone, - "ISO3166Alpha2": IsISO3166Alpha2, - "ISO3166Alpha3": IsISO3166Alpha3, - "ISO4217": IsISO4217, -} - -// ISO3166Entry stores country codes -type ISO3166Entry struct { - EnglishShortName string - FrenchShortName string - Alpha2Code string - Alpha3Code string - Numeric string -} - -//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" -var ISO3166List = []ISO3166Entry{ - {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, - {"Albania", "Albanie (l')", "AL", "ALB", "008"}, - {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, - {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, - {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, - {"Andorra", "Andorre (l')", "AD", "AND", "020"}, - {"Angola", "Angola (l')", "AO", "AGO", "024"}, - {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, - {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, - {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, - {"Australia", "Australie (l')", "AU", "AUS", "036"}, - {"Austria", "Autriche (l')", "AT", "AUT", "040"}, - {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, - {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, - {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, - {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, - {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, - {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, - {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, - {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, - {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, - {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, - {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, - {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, - {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, - {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, - {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, - {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, - {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, - {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, - {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, - {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, - {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, - {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, - {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, - {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, - {"Canada", "Canada (le)", "CA", "CAN", "124"}, - {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, - {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, - {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, - {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, - {"Chad", "Tchad (le)", "TD", "TCD", "148"}, - {"Chile", "Chili (le)", "CL", "CHL", "152"}, - {"China", "Chine (la)", "CN", "CHN", "156"}, - {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, - {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, - {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, - {"Colombia", "Colombie (la)", "CO", "COL", "170"}, - {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, - {"Mayotte", "Mayotte", "YT", "MYT", "175"}, - {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, - {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, - {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, - {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, - {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, - {"Cuba", "Cuba", "CU", "CUB", "192"}, - {"Cyprus", "Chypre", "CY", "CYP", "196"}, - {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, - {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, - {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, - {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, - {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, - {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, - {"El Salvador", "El Salvador", "SV", "SLV", "222"}, - {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, - {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, - {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, - {"Estonia", "Estonie (l')", "EE", "EST", "233"}, - {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, - {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, - {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, - {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, - {"Finland", "Finlande (la)", "FI", "FIN", "246"}, - {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, - {"France", "France (la)", "FR", "FRA", "250"}, - {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, - {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, - {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, - {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, - {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, - {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, - {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, - {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, - {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, - {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, - {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, - {"Kiribati", "Kiribati", "KI", "KIR", "296"}, - {"Greece", "Grèce (la)", "GR", "GRC", "300"}, - {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, - {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, - {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, - {"Guam", "Guam", "GU", "GUM", "316"}, - {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, - {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, - {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, - {"Haiti", "Haïti", "HT", "HTI", "332"}, - {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, - {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, - {"Honduras", "Honduras (le)", "HN", "HND", "340"}, - {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, - {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, - {"Iceland", "Islande (l')", "IS", "ISL", "352"}, - {"India", "Inde (l')", "IN", "IND", "356"}, - {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, - {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, - {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, - {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, - {"Israel", "Israël", "IL", "ISR", "376"}, - {"Italy", "Italie (l')", "IT", "ITA", "380"}, - {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, - {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, - {"Japan", "Japon (le)", "JP", "JPN", "392"}, - {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, - {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, - {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, - {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, - {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, - {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, - {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, - {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, - {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, - {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, - {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, - {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, - {"Libya", "Libye (la)", "LY", "LBY", "434"}, - {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, - {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, - {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, - {"Macao", "Macao", "MO", "MAC", "446"}, - {"Madagascar", "Madagascar", "MG", "MDG", "450"}, - {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, - {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, - {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, - {"Mali", "Mali (le)", "ML", "MLI", "466"}, - {"Malta", "Malte", "MT", "MLT", "470"}, - {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, - {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, - {"Mauritius", "Maurice", "MU", "MUS", "480"}, - {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, - {"Monaco", "Monaco", "MC", "MCO", "492"}, - {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, - {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, - {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, - {"Montserrat", "Montserrat", "MS", "MSR", "500"}, - {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, - {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, - {"Oman", "Oman", "OM", "OMN", "512"}, - {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, - {"Nauru", "Nauru", "NR", "NRU", "520"}, - {"Nepal", "Népal (le)", "NP", "NPL", "524"}, - {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, - {"Curaçao", "Curaçao", "CW", "CUW", "531"}, - {"Aruba", "Aruba", "AW", "ABW", "533"}, - {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, - {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, - {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, - {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, - {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, - {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, - {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, - {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, - {"Niue", "Niue", "NU", "NIU", "570"}, - {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, - {"Norway", "Norvège (la)", "NO", "NOR", "578"}, - {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, - {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, - {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, - {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, - {"Palau", "Palaos (les)", "PW", "PLW", "585"}, - {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, - {"Panama", "Panama (le)", "PA", "PAN", "591"}, - {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, - {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, - {"Peru", "Pérou (le)", "PE", "PER", "604"}, - {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, - {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, - {"Poland", "Pologne (la)", "PL", "POL", "616"}, - {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, - {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, - {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, - {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, - {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, - {"Réunion", "Réunion (La)", "RE", "REU", "638"}, - {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, - {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, - {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, - {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, - {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, - {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, - {"Anguilla", "Anguilla", "AI", "AIA", "660"}, - {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, - {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, - {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, - {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, - {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, - {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, - {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, - {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, - {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, - {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, - {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, - {"Singapore", "Singapour", "SG", "SGP", "702"}, - {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, - {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, - {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, - {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, - {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, - {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, - {"Spain", "Espagne (l')", "ES", "ESP", "724"}, - {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, - {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, - {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, - {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, - {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, - {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, - {"Sweden", "Suède (la)", "SE", "SWE", "752"}, - {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, - {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, - {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, - {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, - {"Togo", "Togo (le)", "TG", "TGO", "768"}, - {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, - {"Tonga", "Tonga (les)", "TO", "TON", "776"}, - {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, - {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, - {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, - {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, - {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, - {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, - {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, - {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, - {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, - {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, - {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, - {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, - {"Guernsey", "Guernesey", "GG", "GGY", "831"}, - {"Jersey", "Jersey", "JE", "JEY", "832"}, - {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, - {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, - {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, - {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, - {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, - {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, - {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, - {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, - {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, - {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, - {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, - {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, -} - -// ISO4217List is the list of ISO currency codes -var ISO4217List = []string{ - "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", - "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", - "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", - "DJF", "DKK", "DOP", "DZD", - "EGP", "ERN", "ETB", "EUR", - "FJD", "FKP", - "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", - "HKD", "HNL", "HRK", "HTG", "HUF", - "IDR", "ILS", "INR", "IQD", "IRR", "ISK", - "JMD", "JOD", "JPY", - "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", - "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", - "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", - "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", - "OMR", - "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", - "QAR", - "RON", "RSD", "RUB", "RWF", - "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", - "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", - "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS", - "VEF", "VND", "VUV", - "WST", - "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", - "YER", - "ZAR", "ZMW", "ZWL", -} - -// ISO693Entry stores ISO language codes -type ISO693Entry struct { - Alpha3bCode string - Alpha2Code string - English string -} - -//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json -var ISO693List = []ISO693Entry{ - {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, - {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, - {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, - {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, - {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, - {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, - {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, - {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, - {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, - {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, - {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, - {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, - {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, - {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, - {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, - {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, - {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, - {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, - {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, - {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, - {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, - {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, - {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, - {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, - {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, - {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, - {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, - {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, - {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, - {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, - {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, - {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, - {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, - {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, - {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, - {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, - {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, - {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, - {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, - {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, - {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, - {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, - {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, - {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, - {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, - {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, - {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, - {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, - {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, - {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, - {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, - {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, - {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, - {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, - {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, - {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, - {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, - {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, - {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, - {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, - {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, - {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, - {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, - {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, - {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, - {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, - {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, - {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, - {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, - {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, - {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, - {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, - {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, - {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, - {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, - {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, - {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, - {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, - {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, - {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, - {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, - {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, - {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, - {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, - {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, - {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, - {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, - {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, - {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, - {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, - {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, - {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, - {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, - {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, - {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, - {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, - {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, - {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, - {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, - {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, - {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, - {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, - {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, - {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, - {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, - {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, - {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, - {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, - {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, - {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, - {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, - {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, - {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, - {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, - {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, - {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, - {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, - {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, - {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, - {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, - {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, - {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, - {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, - {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, - {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, - {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, - {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, - {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, - {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, - {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, - {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, - {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, - {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, - {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, - {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, - {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, - {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, - {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, - {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, - {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, - {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, - {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, - {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, - {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, - {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, - {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, - {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, - {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, - {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, - {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, - {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, - {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, - {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, - {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, - {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, - {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, - {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, - {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, - {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, - {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, - {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, - {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, - {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, - {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, - {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, - {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, - {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, - {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, - {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, - {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, - {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, - {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, - {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, - {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, - {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, - {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, - {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, - {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, - {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, - {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, - {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, - {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, - {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, - {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, -} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go deleted file mode 100644 index a0b706a7..00000000 --- a/vendor/github.com/asaskevich/govalidator/utils.go +++ /dev/null @@ -1,270 +0,0 @@ -package govalidator - -import ( - "errors" - "fmt" - "html" - "math" - "path" - "regexp" - "strings" - "unicode" - "unicode/utf8" -) - -// Contains check if the string contains the substring. -func Contains(str, substring string) bool { - return strings.Contains(str, substring) -} - -// Matches check if string matches the pattern (pattern is regular expression) -// In case of error return false -func Matches(str, pattern string) bool { - match, _ := regexp.MatchString(pattern, str) - return match -} - -// LeftTrim trim characters from the left-side of the input. -// If second argument is empty, it's will be remove leading spaces. -func LeftTrim(str, chars string) string { - if chars == "" { - return strings.TrimLeftFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("^[" + chars + "]+") - return r.ReplaceAllString(str, "") -} - -// RightTrim trim characters from the right-side of the input. -// If second argument is empty, it's will be remove spaces. -func RightTrim(str, chars string) string { - if chars == "" { - return strings.TrimRightFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("[" + chars + "]+$") - return r.ReplaceAllString(str, "") -} - -// Trim trim characters from both sides of the input. -// If second argument is empty, it's will be remove spaces. -func Trim(str, chars string) string { - return LeftTrim(RightTrim(str, chars), chars) -} - -// WhiteList remove characters that do not appear in the whitelist. -func WhiteList(str, chars string) string { - pattern := "[^" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// BlackList remove characters that appear in the blacklist. -func BlackList(str, chars string) string { - pattern := "[" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// StripLow remove characters with a numerical value < 32 and 127, mostly control characters. -// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). -func StripLow(str string, keepNewLines bool) string { - chars := "" - if keepNewLines { - chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" - } else { - chars = "\x00-\x1F\x7F" - } - return BlackList(str, chars) -} - -// ReplacePattern replace regular expression pattern in string -func ReplacePattern(str, pattern, replace string) string { - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, replace) -} - -// Escape replace <, >, & and " with HTML entities. -var Escape = html.EscapeString - -func addSegment(inrune, segment []rune) []rune { - if len(segment) == 0 { - return inrune - } - if len(inrune) != 0 { - inrune = append(inrune, '_') - } - inrune = append(inrune, segment...) - return inrune -} - -// UnderscoreToCamelCase converts from underscore separated form to camel case form. -// Ex.: my_func => MyFunc -func UnderscoreToCamelCase(s string) string { - return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) -} - -// CamelCaseToUnderscore converts from camel case form to underscore separated form. -// Ex.: MyFunc => my_func -func CamelCaseToUnderscore(str string) string { - var output []rune - var segment []rune - for _, r := range str { - - // not treat number as separate segment - if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { - output = addSegment(output, segment) - segment = nil - } - segment = append(segment, unicode.ToLower(r)) - } - output = addSegment(output, segment) - return string(output) -} - -// Reverse return reversed string -func Reverse(s string) string { - r := []rune(s) - for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r) -} - -// GetLines split string by "\n" and return array of lines -func GetLines(s string) []string { - return strings.Split(s, "\n") -} - -// GetLine return specified line of multiline string -func GetLine(s string, index int) (string, error) { - lines := GetLines(s) - if index < 0 || index >= len(lines) { - return "", errors.New("line index out of bounds") - } - return lines[index], nil -} - -// RemoveTags remove all tags from HTML string -func RemoveTags(s string) string { - return ReplacePattern(s, "<[^>]*>", "") -} - -// SafeFileName return safe string that can be used in file names -func SafeFileName(str string) string { - name := strings.ToLower(str) - name = path.Clean(path.Base(name)) - name = strings.Trim(name, " ") - separators, err := regexp.Compile(`[ &_=+:]`) - if err == nil { - name = separators.ReplaceAllString(name, "-") - } - legal, err := regexp.Compile(`[^[:alnum:]-.]`) - if err == nil { - name = legal.ReplaceAllString(name, "") - } - for strings.Contains(name, "--") { - name = strings.Replace(name, "--", "-", -1) - } - return name -} - -// NormalizeEmail canonicalize an email address. -// The local part of the email address is lowercased for all domains; the hostname is always lowercased and -// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). -// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and -// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are -// normalized to @gmail.com. -func NormalizeEmail(str string) (string, error) { - if !IsEmail(str) { - return "", fmt.Errorf("%s is not an email", str) - } - parts := strings.Split(str, "@") - parts[0] = strings.ToLower(parts[0]) - parts[1] = strings.ToLower(parts[1]) - if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { - parts[1] = "gmail.com" - parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] - } - return strings.Join(parts, "@"), nil -} - -// Truncate a string to the closest length without breaking words. -func Truncate(str string, length int, ending string) string { - var aftstr, befstr string - if len(str) > length { - words := strings.Fields(str) - before, present := 0, 0 - for i := range words { - befstr = aftstr - before = present - aftstr = aftstr + words[i] + " " - present = len(aftstr) - if present > length && i != 0 { - if (length - before) < (present - length) { - return Trim(befstr, " /\\.,\"'#!?&@+-") + ending - } - return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending - } - } - } - - return str -} - -// PadLeft pad left side of string if size of string is less then indicated pad length -func PadLeft(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, false) -} - -// PadRight pad right side of string if size of string is less then indicated pad length -func PadRight(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, false, true) -} - -// PadBoth pad sides of string if size of string is less then indicated pad length -func PadBoth(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, true) -} - -// PadString either left, right or both sides, not the padding string can be unicode and more then one -// character -func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { - - // When padded length is less then the current string size - if padLen < utf8.RuneCountInString(str) { - return str - } - - padLen -= utf8.RuneCountInString(str) - - targetLen := padLen - - targetLenLeft := targetLen - targetLenRight := targetLen - if padLeft && padRight { - targetLenLeft = padLen / 2 - targetLenRight = padLen - targetLenLeft - } - - strToRepeatLen := utf8.RuneCountInString(padStr) - - repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) - repeatedString := strings.Repeat(padStr, repeatTimes) - - leftSide := "" - if padLeft { - leftSide = repeatedString[0:targetLenLeft] - } - - rightSide := "" - if padRight { - rightSide = repeatedString[0:targetLenRight] - } - - return leftSide + str + rightSide -} - -// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object -func TruncatingErrorf(str string, args ...interface{}) error { - n := strings.Count(str, "%s") - return fmt.Errorf(str, args[:n]...) -} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go deleted file mode 100644 index b18bbcb4..00000000 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ /dev/null @@ -1,1278 +0,0 @@ -// Package govalidator is package of validators and sanitizers for strings, structs and collections. -package govalidator - -import ( - "bytes" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "net" - "net/url" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -var ( - fieldsRequiredByDefault bool - nilPtrAllowedByRequired = false - notNumberRegexp = regexp.MustCompile("[^0-9]+") - whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) - paramsRegexp = regexp.MustCompile(`\(.*\)$`) -) - -const maxURLRuneCount = 2083 -const minURLRuneCount = 3 -const RF3339WithoutZone = "2006-01-02T15:04:05" - -// SetFieldsRequiredByDefault causes validation to fail when struct fields -// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). -// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -// type exampleStruct struct { -// Name string `` -// Email string `valid:"email"` -// This, however, will only fail when Email is empty or an invalid email address: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email"` -// Lastly, this will only fail when Email is an invalid email address but not when it's empty: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email,optional"` -func SetFieldsRequiredByDefault(value bool) { - fieldsRequiredByDefault = value -} - -// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. -// The validation will still reject ptr fields in their zero value state. Example with this enabled: -// type exampleStruct struct { -// Name *string `valid:"required"` -// With `Name` set to "", this will be considered invalid input and will cause a validation error. -// With `Name` set to nil, this will be considered valid by validation. -// By default this is disabled. -func SetNilPtrAllowedByRequired(value bool) { - nilPtrAllowedByRequired = value -} - -// IsEmail check if the string is an email. -func IsEmail(str string) bool { - // TODO uppercase letters are not supported - return rxEmail.MatchString(str) -} - -// IsExistingEmail check if the string is an email of existing domain -func IsExistingEmail(email string) bool { - - if len(email) < 6 || len(email) > 254 { - return false - } - at := strings.LastIndex(email, "@") - if at <= 0 || at > len(email)-3 { - return false - } - user := email[:at] - host := email[at+1:] - if len(user) > 64 { - return false - } - if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { - return false - } - switch host { - case "localhost", "example.com": - return true - } - if _, err := net.LookupMX(host); err != nil { - if _, err := net.LookupIP(host); err != nil { - return false - } - } - - return true -} - -// IsURL check if the string is an URL. -func IsURL(str string) bool { - if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { - return false - } - strTemp := str - if strings.Contains(str, ":") && !strings.Contains(str, "://") { - // support no indicated urlscheme but with colon for port number - // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString - strTemp = "http://" + str - } - u, err := url.Parse(strTemp) - if err != nil { - return false - } - if strings.HasPrefix(u.Host, ".") { - return false - } - if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { - return false - } - return rxURL.MatchString(str) -} - -// IsRequestURL check if the string rawurl, assuming -// it was received in an HTTP request, is a valid -// URL confirm to RFC 3986 -func IsRequestURL(rawurl string) bool { - url, err := url.ParseRequestURI(rawurl) - if err != nil { - return false //Couldn't even parse the rawurl - } - if len(url.Scheme) == 0 { - return false //No Scheme found - } - return true -} - -// IsRequestURI check if the string rawurl, assuming -// it was received in an HTTP request, is an -// absolute URI or an absolute path. -func IsRequestURI(rawurl string) bool { - _, err := url.ParseRequestURI(rawurl) - return err == nil -} - -// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid. -func IsAlpha(str string) bool { - if IsNull(str) { - return true - } - return rxAlpha.MatchString(str) -} - -//IsUTFLetter check if the string contains only unicode letter characters. -//Similar to IsAlpha but for all languages. Empty string is valid. -func IsUTFLetter(str string) bool { - if IsNull(str) { - return true - } - - for _, c := range str { - if !unicode.IsLetter(c) { - return false - } - } - return true - -} - -// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid. -func IsAlphanumeric(str string) bool { - if IsNull(str) { - return true - } - return rxAlphanumeric.MatchString(str) -} - -// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid. -func IsUTFLetterNumeric(str string) bool { - if IsNull(str) { - return true - } - for _, c := range str { - if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok - return false - } - } - return true - -} - -// IsNumeric check if the string contains only numbers. Empty string is valid. -func IsNumeric(str string) bool { - if IsNull(str) { - return true - } - return rxNumeric.MatchString(str) -} - -// IsUTFNumeric check if the string contains only unicode numbers of any kind. -// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. -func IsUTFNumeric(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsNumber(c) { //numbers && minus sign are ok - return false - } - } - return true - -} - -// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid. -func IsUTFDigit(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsDigit(c) { //digits && minus sign are ok - return false - } - } - return true - -} - -// IsHexadecimal check if the string is a hexadecimal number. -func IsHexadecimal(str string) bool { - return rxHexadecimal.MatchString(str) -} - -// IsHexcolor check if the string is a hexadecimal color. -func IsHexcolor(str string) bool { - return rxHexcolor.MatchString(str) -} - -// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB). -func IsRGBcolor(str string) bool { - return rxRGBcolor.MatchString(str) -} - -// IsLowerCase check if the string is lowercase. Empty string is valid. -func IsLowerCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToLower(str) -} - -// IsUpperCase check if the string is uppercase. Empty string is valid. -func IsUpperCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToUpper(str) -} - -// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid. -func HasLowerCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasLowerCase.MatchString(str) -} - -// HasUpperCase check if the string contians as least 1 uppercase. Empty string is valid. -func HasUpperCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasUpperCase.MatchString(str) -} - -// IsInt check if the string is an integer. Empty string is valid. -func IsInt(str string) bool { - if IsNull(str) { - return true - } - return rxInt.MatchString(str) -} - -// IsFloat check if the string is a float. -func IsFloat(str string) bool { - return str != "" && rxFloat.MatchString(str) -} - -// IsDivisibleBy check if the string is a number that's divisible by another. -// If second argument is not valid integer or zero, it's return false. -// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). -func IsDivisibleBy(str, num string) bool { - f, _ := ToFloat(str) - p := int64(f) - q, _ := ToInt(num) - if q == 0 { - return false - } - return (p == 0) || (p%q == 0) -} - -// IsNull check if the string is null. -func IsNull(str string) bool { - return len(str) == 0 -} - -// HasWhitespaceOnly checks the string only contains whitespace -func HasWhitespaceOnly(str string) bool { - return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) -} - -// HasWhitespace checks if the string contains any whitespace -func HasWhitespace(str string) bool { - return len(str) > 0 && rxHasWhitespace.MatchString(str) -} - -// IsByteLength check if the string's length (in bytes) falls in a range. -func IsByteLength(str string, min, max int) bool { - return len(str) >= min && len(str) <= max -} - -// IsUUIDv3 check if the string is a UUID version 3. -func IsUUIDv3(str string) bool { - return rxUUID3.MatchString(str) -} - -// IsUUIDv4 check if the string is a UUID version 4. -func IsUUIDv4(str string) bool { - return rxUUID4.MatchString(str) -} - -// IsUUIDv5 check if the string is a UUID version 5. -func IsUUIDv5(str string) bool { - return rxUUID5.MatchString(str) -} - -// IsUUID check if the string is a UUID (version 3, 4 or 5). -func IsUUID(str string) bool { - return rxUUID.MatchString(str) -} - -// IsCreditCard check if the string is a credit card. -func IsCreditCard(str string) bool { - sanitized := notNumberRegexp.ReplaceAllString(str, "") - if !rxCreditCard.MatchString(sanitized) { - return false - } - var sum int64 - var digit string - var tmpNum int64 - var shouldDouble bool - for i := len(sanitized) - 1; i >= 0; i-- { - digit = sanitized[i:(i + 1)] - tmpNum, _ = ToInt(digit) - if shouldDouble { - tmpNum *= 2 - if tmpNum >= 10 { - sum += ((tmpNum % 10) + 1) - } else { - sum += tmpNum - } - } else { - sum += tmpNum - } - shouldDouble = !shouldDouble - } - - return sum%10 == 0 -} - -// IsISBN10 check if the string is an ISBN version 10. -func IsISBN10(str string) bool { - return IsISBN(str, 10) -} - -// IsISBN13 check if the string is an ISBN version 13. -func IsISBN13(str string) bool { - return IsISBN(str, 13) -} - -// IsISBN check if the string is an ISBN (version 10 or 13). -// If version value is not equal to 10 or 13, it will be check both variants. -func IsISBN(str string, version int) bool { - sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") - var checksum int32 - var i int32 - if version == 10 { - if !rxISBN10.MatchString(sanitized) { - return false - } - for i = 0; i < 9; i++ { - checksum += (i + 1) * int32(sanitized[i]-'0') - } - if sanitized[9] == 'X' { - checksum += 10 * 10 - } else { - checksum += 10 * int32(sanitized[9]-'0') - } - if checksum%11 == 0 { - return true - } - return false - } else if version == 13 { - if !rxISBN13.MatchString(sanitized) { - return false - } - factor := []int32{1, 3} - for i = 0; i < 12; i++ { - checksum += factor[i%2] * int32(sanitized[i]-'0') - } - return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 - } - return IsISBN(str, 10) || IsISBN(str, 13) -} - -// IsJSON check if the string is valid JSON (note: uses json.Unmarshal). -func IsJSON(str string) bool { - var js json.RawMessage - return json.Unmarshal([]byte(str), &js) == nil -} - -// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid. -func IsMultibyte(str string) bool { - if IsNull(str) { - return true - } - return rxMultibyte.MatchString(str) -} - -// IsASCII check if the string contains ASCII chars only. Empty string is valid. -func IsASCII(str string) bool { - if IsNull(str) { - return true - } - return rxASCII.MatchString(str) -} - -// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid. -func IsPrintableASCII(str string) bool { - if IsNull(str) { - return true - } - return rxPrintableASCII.MatchString(str) -} - -// IsFullWidth check if the string contains any full-width chars. Empty string is valid. -func IsFullWidth(str string) bool { - if IsNull(str) { - return true - } - return rxFullWidth.MatchString(str) -} - -// IsHalfWidth check if the string contains any half-width chars. Empty string is valid. -func IsHalfWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) -} - -// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid. -func IsVariableWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) -} - -// IsBase64 check if a string is base64 encoded. -func IsBase64(str string) bool { - return rxBase64.MatchString(str) -} - -// IsFilePath check is a string is Win or Unix file path and returns it's type. -func IsFilePath(str string) (bool, int) { - if rxWinPath.MatchString(str) { - //check windows path limit see: - // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath - if len(str[3:]) > 32767 { - return false, Win - } - return true, Win - } else if rxUnixPath.MatchString(str) { - return true, Unix - } - return false, Unknown -} - -// IsDataURI checks if a string is base64 encoded data URI such as an image -func IsDataURI(str string) bool { - dataURI := strings.Split(str, ",") - if !rxDataURI.MatchString(dataURI[0]) { - return false - } - return IsBase64(dataURI[1]) -} - -// IsISO3166Alpha2 checks if a string is valid two-letter country code -func IsISO3166Alpha2(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO3166Alpha3 checks if a string is valid three-letter country code -func IsISO3166Alpha3(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha3Code { - return true - } - } - return false -} - -// IsISO693Alpha2 checks if a string is valid two-letter language code -func IsISO693Alpha2(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO693Alpha3b checks if a string is valid three-letter language code -func IsISO693Alpha3b(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha3bCode { - return true - } - } - return false -} - -// IsDNSName will validate the given string as a DNS name -func IsDNSName(str string) bool { - if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { - // constraints already violated - return false - } - return !IsIP(str) && rxDNSName.MatchString(str) -} - -// IsHash checks if a string is a hash of type algorithm. -// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] -func IsHash(str string, algorithm string) bool { - len := "0" - algo := strings.ToLower(algorithm) - - if algo == "crc32" || algo == "crc32b" { - len = "8" - } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { - len = "32" - } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { - len = "40" - } else if algo == "tiger192" { - len = "48" - } else if algo == "sha256" { - len = "64" - } else if algo == "sha384" { - len = "96" - } else if algo == "sha512" { - len = "128" - } else { - return false - } - - return Matches(str, "^[a-f0-9]{"+len+"}$") -} - -// IsDialString validates the given string for usage with the various Dial() functions -func IsDialString(str string) bool { - - if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { - return true - } - - return false -} - -// IsIP checks if a string is either IP version 4 or 6. -func IsIP(str string) bool { - return net.ParseIP(str) != nil -} - -// IsPort checks if a string represents a valid port -func IsPort(str string) bool { - if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { - return true - } - return false -} - -// IsIPv4 check if the string is an IP version 4. -func IsIPv4(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ".") -} - -// IsIPv6 check if the string is an IP version 6. -func IsIPv6(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ":") -} - -// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6) -func IsCIDR(str string) bool { - _, _, err := net.ParseCIDR(str) - return err == nil -} - -// IsMAC check if a string is valid MAC address. -// Possible MAC formats: -// 01:23:45:67:89:ab -// 01:23:45:67:89:ab:cd:ef -// 01-23-45-67-89-ab -// 01-23-45-67-89-ab-cd-ef -// 0123.4567.89ab -// 0123.4567.89ab.cdef -func IsMAC(str string) bool { - _, err := net.ParseMAC(str) - return err == nil -} - -// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name -func IsHost(str string) bool { - return IsIP(str) || IsDNSName(str) -} - -// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId. -func IsMongoID(str string) bool { - return rxHexadecimal.MatchString(str) && (len(str) == 24) -} - -// IsLatitude check if a string is valid latitude. -func IsLatitude(str string) bool { - return rxLatitude.MatchString(str) -} - -// IsLongitude check if a string is valid longitude. -func IsLongitude(str string) bool { - return rxLongitude.MatchString(str) -} - -// IsRsaPublicKey check if a string is valid public key with provided length -func IsRsaPublicKey(str string, keylen int) bool { - bb := bytes.NewBufferString(str) - pemBytes, err := ioutil.ReadAll(bb) - if err != nil { - return false - } - block, _ := pem.Decode(pemBytes) - if block != nil && block.Type != "PUBLIC KEY" { - return false - } - var der []byte - - if block != nil { - der = block.Bytes - } else { - der, err = base64.StdEncoding.DecodeString(str) - if err != nil { - return false - } - } - - key, err := x509.ParsePKIXPublicKey(der) - if err != nil { - return false - } - pubkey, ok := key.(*rsa.PublicKey) - if !ok { - return false - } - bitlen := len(pubkey.N.Bytes()) * 8 - return bitlen == int(keylen) -} - -func toJSONName(tag string) string { - if tag == "" { - return "" - } - - // JSON name always comes first. If there's no options then split[0] is - // JSON name, if JSON name is not set, then split[0] is an empty string. - split := strings.SplitN(tag, ",", 2) - - name := split[0] - - // However it is possible that the field is skipped when - // (de-)serializing from/to JSON, in which case assume that there is no - // tag name to use - if name == "-" { - return "" - } - return name -} - -func PrependPathToErrors(err error, path string) error { - switch err2 := err.(type) { - case Error: - err2.Path = append([]string{path}, err2.Path...) - return err2 - case Errors: - errors := err2.Errors() - for i, err3 := range errors { - errors[i] = PrependPathToErrors(err3, path) - } - return err2 - } - fmt.Println(err) - return err -} - -// ValidateStruct use tags for fields. -// result will be equal to `false` if there are any errors. -func ValidateStruct(s interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - val := reflect.ValueOf(s) - if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { - val = val.Elem() - } - // we only accept structs - if val.Kind() != reflect.Struct { - return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) - } - var errs Errors - for i := 0; i < val.NumField(); i++ { - valueField := val.Field(i) - typeField := val.Type().Field(i) - if typeField.PkgPath != "" { - continue // Private field - } - structResult := true - if valueField.Kind() == reflect.Interface { - valueField = valueField.Elem() - } - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - typeField.Tag.Get(tagName) != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = PrependPathToErrors(err, typeField.Name) - errs = append(errs, err) - } - } - resultField, err2 := typeCheck(valueField, typeField, val, nil) - if err2 != nil { - - // Replace structure name with JSON name if there is a tag on the variable - jsonTag := toJSONName(typeField.Tag.Get("json")) - if jsonTag != "" { - switch jsonError := err2.(type) { - case Error: - jsonError.Name = jsonTag - err2 = jsonError - case Errors: - for i2, err3 := range jsonError { - switch customErr := err3.(type) { - case Error: - customErr.Name = jsonTag - jsonError[i2] = customErr - } - } - - err2 = jsonError - } - } - - errs = append(errs, err2) - } - result = result && resultField && structResult - } - if len(errs) > 0 { - err = errs - } - return result, err -} - -// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} -func parseTagIntoMap(tag string) tagOptionsMap { - optionsMap := make(tagOptionsMap) - options := strings.Split(tag, ",") - - for i, option := range options { - option = strings.TrimSpace(option) - - validationOptions := strings.Split(option, "~") - if !isValidTag(validationOptions[0]) { - continue - } - if len(validationOptions) == 2 { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} - } else { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} - } - } - return optionsMap -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -// IsSSN will validate the given string as a U.S. Social Security Number -func IsSSN(str string) bool { - if str == "" || len(str) != 11 { - return false - } - return rxSSN.MatchString(str) -} - -// IsSemver check if string is valid semantic version -func IsSemver(str string) bool { - return rxSemver.MatchString(str) -} - -// IsTime check if string is valid according to given format -func IsTime(str string, format string) bool { - _, err := time.Parse(format, str) - return err == nil -} - -// IsRFC3339 check if string is valid timestamp value according to RFC3339 -func IsRFC3339(str string) bool { - return IsTime(str, time.RFC3339) -} - -// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone. -func IsRFC3339WithoutZone(str string) bool { - return IsTime(str, RF3339WithoutZone) -} - -// IsISO4217 check if string is valid ISO currency code -func IsISO4217(str string) bool { - for _, currency := range ISO4217List { - if str == currency { - return true - } - } - - return false -} - -// ByteLength check string's length -func ByteLength(str string, params ...string) bool { - if len(params) == 2 { - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return len(str) >= int(min) && len(str) <= int(max) - } - - return false -} - -// RuneLength check string's length -// Alias for StringLength -func RuneLength(str string, params ...string) bool { - return StringLength(str, params...) -} - -// IsRsaPub check whether string is valid RSA key -// Alias for IsRsaPublicKey -func IsRsaPub(str string, params ...string) bool { - if len(params) == 1 { - len, _ := ToInt(params[0]) - return IsRsaPublicKey(str, int(len)) - } - - return false -} - -// StringMatches checks if a string matches a given pattern. -func StringMatches(s string, params ...string) bool { - if len(params) == 1 { - pattern := params[0] - return Matches(s, pattern) - } - return false -} - -// StringLength check string's length (including multi byte strings) -func StringLength(str string, params ...string) bool { - - if len(params) == 2 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return strLength >= int(min) && strLength <= int(max) - } - - return false -} - -// Range check string's length -func Range(str string, params ...string) bool { - if len(params) == 2 { - value, _ := ToFloat(str) - min, _ := ToFloat(params[0]) - max, _ := ToFloat(params[1]) - return InRange(value, min, max) - } - - return false -} - -func isInRaw(str string, params ...string) bool { - if len(params) == 1 { - rawParams := params[0] - - parsedParams := strings.Split(rawParams, "|") - - return IsIn(str, parsedParams...) - } - - return false -} - -// IsIn check if string str is a member of the set of strings params -func IsIn(str string, params ...string) bool { - for _, param := range params { - if str == param { - return true - } - } - - return false -} - -func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { - if nilPtrAllowedByRequired { - k := v.Kind() - if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { - return true, nil - } - } - - if requiredOption, isRequired := options["required"]; isRequired { - if len(requiredOption.customErrorMessage) > 0 { - return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} - } - return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} - } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { - return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} - } - // not required and empty is valid - return true, nil -} - -func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { - if !v.IsValid() { - return false, nil - } - - tag := t.Tag.Get(tagName) - - // Check if the field should be ignored - switch tag { - case "": - if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { - if !fieldsRequiredByDefault { - return true, nil - } - return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} - } - case "-": - return true, nil - } - - isRootType := false - if options == nil { - isRootType = true - options = parseTagIntoMap(tag) - } - - if isEmptyValue(v) { - // an empty value is not validated, check only required - isValid, resultErr = checkRequired(v, t, options) - for key := range options { - delete(options, key) - } - return isValid, resultErr - } - - var customTypeErrors Errors - optionsOrder := options.orderedKeys() - for _, validatorName := range optionsOrder { - validatorStruct := options[validatorName] - if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { - delete(options, validatorName) - - if result := validatefunc(v.Interface(), o.Interface()); !result { - if len(validatorStruct.customErrorMessage) > 0 { - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) - continue - } - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) - } - } - } - - if len(customTypeErrors.Errors()) > 0 { - return false, customTypeErrors - } - - if isRootType { - // Ensure that we've checked the value by all specified validators before report that the value is valid - defer func() { - delete(options, "optional") - delete(options, "required") - - if isValid && resultErr == nil && len(options) != 0 { - optionsOrder := options.orderedKeys() - for _, validator := range optionsOrder { - isValid = false - resultErr = Error{t.Name, fmt.Errorf( - "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} - return - } - } - }() - } - - switch v.Kind() { - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Float32, reflect.Float64, - reflect.String: - // for each tag option check the map of validator functions - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // Check whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // Check for param validators - for key, value := range ParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := ParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - // type not yet supported, fail - return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} - } - } - - if validatefunc, ok := TagMap[validator]; ok { - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field); !result && !negate || result && negate { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - //Not Yet Supported Types (Fail here!) - err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) - return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} - } - } - } - return true, nil - case reflect.Map: - if v.Type().Key().Kind() != reflect.String { - return false, &UnsupportedTypeError{v.Type()} - } - var sv stringValues - sv = v.MapKeys() - sort.Sort(sv) - result := true - for i, k := range sv { - var resultItem bool - var err error - if v.MapIndex(k).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.MapIndex(k), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) - if err != nil { - err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Slice, reflect.Array: - result := true - for i := 0; i < v.Len(); i++ { - var resultItem bool - var err error - if v.Index(i).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.Index(i), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.Index(i).Interface()) - if err != nil { - err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Interface: - // If the value is an interface then encode its element - if v.IsNil() { - return true, nil - } - return ValidateStruct(v.Interface()) - case reflect.Ptr: - // If the value is a pointer then check its element - if v.IsNil() { - return true, nil - } - return typeCheck(v.Elem(), t, o, options) - case reflect.Struct: - return ValidateStruct(v.Interface()) - default: - return false, &UnsupportedTypeError{v.Type()} - } -} - -func stripParams(validatorString string) string { - return paramsRegexp.ReplaceAllString(validatorString, "") -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.String, reflect.Array: - return v.Len() == 0 - case reflect.Map, reflect.Slice: - return v.Len() == 0 || v.IsNil() - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) -} - -// ErrorByField returns error for specified field of the struct -// validated by ValidateStruct or empty string if there are no errors -// or this field doesn't exists or doesn't have any errors. -func ErrorByField(e error, field string) string { - if e == nil { - return "" - } - return ErrorsByField(e)[field] -} - -// ErrorsByField returns map of errors of the struct validated -// by ValidateStruct or empty map if there are no errors. -func ErrorsByField(e error) map[string]string { - m := make(map[string]string) - if e == nil { - return m - } - // prototype for ValidateStruct - - switch e.(type) { - case Error: - m[e.(Error).Name] = e.(Error).Err.Error() - case Errors: - for _, item := range e.(Errors).Errors() { - n := ErrorsByField(item) - for k, v := range n { - m[k] = v - } - } - } - - return m -} - -// Error returns string equivalent for reflect.Type -func (e *UnsupportedTypeError) Error() string { - return "validator: unsupported type: " + e.Type.String() -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } -func (sv stringValues) get(i int) string { return sv[i].String() } diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml deleted file mode 100644 index cac7a5fc..00000000 --- a/vendor/github.com/asaskevich/govalidator/wercker.yml +++ /dev/null @@ -1,15 +0,0 @@ -box: golang -build: - steps: - - setup-go-workspace - - - script: - name: go get - code: | - go version - go get -t ./... - - - script: - name: go test - code: | - go test -race ./... diff --git a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt deleted file mode 100644 index 5f14d116..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/error.go deleted file mode 100644 index 56fdfc2b..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/error.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/types.go deleted file mode 100644 index 0202a008..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/types.go +++ /dev/null @@ -1,194 +0,0 @@ -package awserr - -import "fmt" - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occurred", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/chain_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/chain_provider.go deleted file mode 100644 index c78dfac6..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/chain_provider.go +++ /dev/null @@ -1,75 +0,0 @@ -package aws - -import ( - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Credentials, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a CredentialsProvider is found which returns valid credentials Credentials ChainProvider -// will cache that CredentialsProvider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// via the environment variables. If there are none ChainProvider will check -// the next CredentialsProvider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := aws.NewChainCredentials( -// []aws.CredentialsProvider{ -// &credentials.EnvProvider{}, -// &ec2rolecreds.EC2RoleProvider{ -// Client: ec2metadata.New(cfg), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// cfg := cfg.Copy() -// cfg.Credentials = creds -// svc := ec2.New(cfg) -// -type ChainProvider struct { - SafeCredentialsProvider - - Providers []CredentialsProvider -} - -// NewChainProvider returns a pointer to a new ChainProvider value wrapping -// a chain of credentials providers. -func NewChainProvider(providers []CredentialsProvider) *ChainProvider { - p := &ChainProvider{ - Providers: append([]CredentialsProvider{}, providers...), - } - p.RetrieveFn = p.retrieveFn - - return p -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) retrieveFn() (Credentials, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - return creds, nil - } - errs = append(errs, err) - } - - return Credentials{}, - awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/client.go deleted file mode 100644 index f5b63bde..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/client.go +++ /dev/null @@ -1,85 +0,0 @@ -package aws - -import ( - "net/http" -) - -// Metadata wraps immutable data from the Client structure. -type Metadata struct { - ServiceName string - APIVersion string - - Endpoint string - SigningName string - SigningRegion string - - JSONVersion string - TargetPrefix string -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - Metadata Metadata - - Config Config - - Region string - Credentials CredentialsProvider - EndpointResolver EndpointResolver - Handlers Handlers - Retryer Retryer - - // TODO replace with value not pointer - LogLevel LogLevel - Logger Logger - - HTTPClient *http.Client -} - -// NewClient will return a pointer to a new initialized service client. -func NewClient(cfg Config, metadata Metadata) *Client { - svc := &Client{ - Metadata: metadata, - - // TODO remove config when request reqfactored - Config: cfg, - - Region: cfg.Region, - Credentials: cfg.Credentials, - EndpointResolver: cfg.EndpointResolver, - Handlers: cfg.Handlers.Copy(), - Retryer: cfg.Retryer, - - LogLevel: cfg.LogLevel, - Logger: cfg.Logger, - } - - retryer := cfg.Retryer - if retryer == nil { - // TODO need better way of specifing default num retries - retryer = DefaultRetryer{NumMaxRetries: 3} - } - svc.Retryer = retryer - - svc.AddDebugHandlers() - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *Operation, params interface{}, data interface{}) *Request { - return New(c.Config, c.Metadata, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - if !c.Config.LogLevel.AtLeast(LogDebug) { - return - } - - c.Handlers.Send.PushFrontNamed(NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) - c.Handlers.Send.PushBackNamed(NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/client_logger.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/client_logger.go deleted file mode 100644 index f3e2e86a..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/client_logger.go +++ /dev/null @@ -1,105 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http/httputil" -) - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s -------------------------------------------------------` - -type logWriter struct { - // Logger is what we will use to log the payload of a response. - Logger Logger - // buf stores the contents of what has been read - buf *bytes.Buffer -} - -func (logger *logWriter) Write(b []byte) (int, error) { - return logger.buf.Write(b) -} - -type teeReaderCloser struct { - // io.Reader will be a tee reader that is used during logging. - // This structure will read from a body and write the contents to a logger. - io.Reader - // Source is used just to close when we are done reading. - Source io.ReadCloser -} - -func (reader *teeReaderCloser) Close() error { - return reader.Source.Close() -} - -func logRequest(r *Request) { - logBody := r.Config.LogLevel.Matches(LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.Metadata.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.Metadata.ServiceName, r.Operation.Name, string(dumpedBody))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -func logResponse(r *Request) { - lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, - } - - handlerFn := func(req *Request) { - body, err := httputil.DumpResponse(req.HTTPResponse, false) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.Metadata.ServiceName, req.Operation.Name, err)) - return - } - - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.Metadata.ServiceName, req.Operation.Name, err)) - return - } - lw.Logger.Log(fmt.Sprintf(logRespMsg, req.Metadata.ServiceName, req.Operation.Name, string(body))) - if req.Config.LogLevel.Matches(LogDebugWithHTTPBody) { - lw.Logger.Log(string(b)) - } - } - - const handlerName = "awsdk.client.LogResponse.ResponseBody" - - r.Handlers.Unmarshal.SetBackNamed(NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) - r.Handlers.UnmarshalError.SetBackNamed(NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go deleted file mode 100644 index 501e8ce9..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ /dev/null @@ -1,105 +0,0 @@ -package aws - -import ( - "net/http" -) - -// A Config provides service configuration for service clients. -type Config struct { - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // See http://docs.aws.amazon.com/general/latest/gr/rande.html for - // information on AWS regions. - Region string - - // The credentials object to use when signing requests. Defaults to a - // chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials CredentialsProvider - - // The resolver to use for looking up endpoints for AWS service clients - // to use based on region. - EndpointResolver EndpointResolver - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // TODO document - Handlers Handlers - - // Retryer guides how HTTP requests should be retried in case of - // recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the client.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - Retryer Retryer - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel LogLevel - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call - // ShouldRetry regardless of whether or not if request.Retryable is set. - // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck - // is not set, then ShouldRetry will only be called if request.Retryable is nil. - // Proper handling of the request.Retryable field is important when setting this field. - // - // TODO this config field is depercated and needs removed. - EnforceShouldRetryCheck bool - - // DisableRestProtocolURICleaning will not clean the URL path when making - // rest protocol requests. Will default to false. This would only be used - // for empty directory names in s3 requests. - // - // Example: - // cfg, err := external.LoadDefaultAWSConfig() - // cfg.DisableRestProtocolURICleaning = true - // - // svc := s3.New(cfg) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("//foo//bar//moo"), - // }) - // - // TODO need better way of representing support for this concept. Not on Config. - DisableRestProtocolURICleaning bool - - // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing - // request endpoint hosts with modeled information. - // - // Disabling this feature is useful when you want to use local endpoints - // for testing that do not support the modeled host prefix pattern. - DisableEndpointHostPrefix bool -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -func NewConfig() *Config { - return &Config{} -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c Config) Copy() Config { - cp := c - cp.Handlers = cp.Handlers.Copy() - - return cp -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error.go deleted file mode 100644 index 5fe8be63..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !appengine,!plan9 - -package aws - -import ( - "net" - "os" - "syscall" -) - -func isErrConnectionReset(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - return sysErr.Err == syscall.ECONNRESET - } - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error_other.go deleted file mode 100644 index ca8422ef..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine plan9 - -package aws - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - return strings.Contains(err.Error(), "connection reset") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go deleted file mode 100644 index 79f42685..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go +++ /dev/null @@ -1,71 +0,0 @@ -package aws - -import ( - "time" -) - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return backgroundCtx -} - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_6.go deleted file mode 100644 index 8fdda530..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_6.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !go1.7 - -package aws - -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case backgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -var ( - backgroundCtx = new(emptyCtx) -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_7.go deleted file mode 100644 index 064f75c9..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_7.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.7 - -package aws - -import "context" - -var ( - backgroundCtx = context.Background() -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/convert_types.go deleted file mode 100644 index ff5d58e0..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/convert_types.go +++ /dev/null @@ -1,387 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// SecondsTimeValue converts an int64 pointer to a time.Time value -// representing seconds since Epoch or time.Time{} if the pointer is nil. -func SecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix((*v / 1000), 0) - } - return time.Time{} -} - -// MillisecondsTimeValue converts an int64 pointer to a time.Time value -// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. -func MillisecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix(0, (*v * 1000000)) - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go deleted file mode 100644 index db93f59d..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ /dev/null @@ -1,138 +0,0 @@ -package aws - -import ( - "math" - "sync" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -// NeverExpire is the time identifier used when a credential provider's -// credentials will not expire. This is used in cases where a non-expiring -// provider type cannot be used. -var NeverExpire = time.Unix(math.MaxInt64, 0) - -// AnonymousCredentials is an empty CredentialProvider that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// s3Cfg := cfg.Copy() -// s3cfg.Credentials = AnonymousCredentials -// -// svc := s3.New(s3Cfg) -var AnonymousCredentials = StaticCredentialsProvider{ - Value: Credentials{Source: "AnonymousCredentials"}, -} - -// An Expiration provides wrapper around time with expiration related methods. -type Expiration time.Time - -// Expired returns if the time has expired. - -// A Credentials is the AWS credentials value for individual credential fields. -type Credentials struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Source of the credentials - Source string - - // Time the credentials will expire. - CanExpire bool - Expires time.Time -} - -// Expired returns if the credetials have expired. -func (v Credentials) Expired() bool { - if v.CanExpire { - return !v.Expires.After(sdk.NowTime()) - } - - return false -} - -// HasKeys returns if the credentials keys are set. -func (v Credentials) HasKeys() bool { - return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0 -} - -// A CredentialsProvider is the interface for any component which will provide credentials -// Credentials. A CredentialsProvider is required to manage its own Expired state, and what to -// be expired means. -// -// The CredentialsProvider should not need to implement its own mutexes, because -// that will be managed by CredentialsLoader. -type CredentialsProvider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Credentials, error) - - // TODO should Retrieve take a context? -} - -// SafeCredentialsProvider provides caching and concurrency safe credentials -// retrieval via the RetrieveFn. -type SafeCredentialsProvider struct { - RetrieveFn func() (Credentials, error) - - creds atomic.Value - m sync.Mutex -} - -// Retrieve returns the credentials. If the credentials have already been -// retrieved, and not expired the cached credentials will be returned. If the -// credentails have not been retrieved yet, or expired RetrieveFn will be called. -// -// Retruns and error if RetrieveFn returns an error. -func (p *SafeCredentialsProvider) Retrieve() (Credentials, error) { - if creds := p.getCreds(); creds != nil { - return *creds, nil - } - - p.m.Lock() - defer p.m.Unlock() - - // Make sure another goroutine didn't already update the credentials. - if creds := p.getCreds(); creds != nil { - return *creds, nil - } - - creds, err := p.RetrieveFn() - if err != nil { - return Credentials{}, err - } - p.creds.Store(&creds) - - return creds, nil -} - -func (p *SafeCredentialsProvider) getCreds() *Credentials { - v := p.creds.Load() - if v == nil { - return nil - } - - c := v.(*Credentials) - if c != nil && c.HasKeys() && !c.Expired() { - return c - } - - return nil -} - -// Invalidate will invalidate the cached credentials. The next call to Retrieve -// will cause RetrieveFn to be called. -func (p *SafeCredentialsProvider) Invalidate() { - p.creds.Store((*Credentials)(nil)) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/default_retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/default_retryer.go deleted file mode 100644 index a08af806..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/default_retryer.go +++ /dev/null @@ -1,136 +0,0 @@ -package aws - -import ( - "math/rand" - "strconv" - "sync" - "time" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: -// -// type retryer struct { -// client.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() int { return 100 } -type DefaultRetryer struct { - NumMaxRetries int -} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - if delay, ok := getRetryDelay(r); ok { - return delay - } - - minTime = 500 - } - - retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { - retryCount = 8 - } - - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *Request) bool { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable != nil { - return *r.Retryable - } - - if r.HTTPResponse.StatusCode >= 500 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 502: - case 503: - case 504: - default: - return r.IsErrorThrottle() - } - - return true -} - -// This will look in the Retry-After header, RFC 7231, for how long -// it will wait before attempting another request -func getRetryDelay(r *Request) (time.Duration, bool) { - if !canUseRetryAfterHeader(r) { - return 0, false - } - - delayStr := r.HTTPResponse.Header.Get("Retry-After") - if len(delayStr) == 0 { - return 0, false - } - - delay, err := strconv.Atoi(delayStr) - if err != nil { - return 0, false - } - - return time.Duration(delay) * time.Second, true -} - -// Will look at the status code to see if the retry header pertains to -// the status code. -func canUseRetryAfterHeader(r *Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 503: - default: - return false - } - - return true -} - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go deleted file mode 100644 index fb92d42a..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go +++ /dev/null @@ -1,92 +0,0 @@ -// Package defaults is a collection of helpers to retrieve the SDK's default -// configuration and handlers. -// -// Generally this package shouldn't be used directly, but external.Config -// instead. This package is useful when you need to reset the defaults -// of a service client to the SDK defaults before setting -// additional parameters. -package defaults - -import ( - "log" - "net/http" - "os" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/endpoints" -) - -// Logger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func Logger() aws.Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} - -// Config returns the default configuration without credentials. -// To retrieve a config with credentials also included use -// `defaults.Get().Config` instead. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the configuration of an -// existing service client. -func Config() aws.Config { - return aws.Config{ - EndpointResolver: endpoints.NewDefaultResolver(), - Credentials: aws.AnonymousCredentials, - HTTPClient: HTTPClient(), - Logger: Logger(), - Handlers: Handlers(), - } -} - -// HTTPClient will return a new HTTP Client configured for the SDK. -// -// Does not use http.DefaultClient nor http.DefaultTransport. -func HTTPClient() *http.Client { - return &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - MaxIdleConns: 100, - MaxIdleConnsPerHost: 10, - IdleConnTimeout: 30 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 5 * time.Second, - }, - } -} - -// Handlers returns the default request handlers. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the request handlers of an -// existing service client. -func Handlers() aws.Handlers { - var handlers aws.Handlers - - handlers.Validate.PushBackNamed(ValidateEndpointHandler) - handlers.Validate.PushBackNamed(ValidateParametersHandler) - handlers.Validate.AfterEachFn = aws.HandlerListStopOnError - handlers.Build.PushBackNamed(SDKVersionUserAgentHandler) - handlers.Build.PushBackNamed(AddHostExecEnvUserAgentHander) - handlers.Build.AfterEachFn = aws.HandlerListStopOnError - handlers.Sign.PushBackNamed(BuildContentLengthHandler) - handlers.Send.PushBackNamed(ValidateReqSigHandler) - handlers.Send.PushBackNamed(SendHandler) - handlers.AfterRetry.PushBackNamed(AfterRetryHandler) - handlers.ValidateResponse.PushBackNamed(ValidateResponseHandler) - - return handlers -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/handlers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/handlers.go deleted file mode 100644 index cebd006b..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/handlers.go +++ /dev/null @@ -1,229 +0,0 @@ -package defaults - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "strconv" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be added to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = aws.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *aws.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// ValidateReqSigHandler is a request handler to ensure that the request's -// signature doesn't expire before it is sent. This can happen when a request -// is built and signed significantly before it is sent. Or significant delays -// occur when retrying requests that would cause the signature to expire. -var ValidateReqSigHandler = aws.NamedHandler{ - Name: "core.ValidateReqSigHandler", - Fn: func(r *aws.Request) { - // Unsigned requests are not signed - if r.Config.Credentials == aws.AnonymousCredentials { - return - } - - signedTime := r.Time - if !r.LastSignedAt.IsZero() { - signedTime = r.LastSignedAt - } - - // 10 minutes to allow for some clock skew/delays in transmission. - // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(10 * time.Minute).After(time.Now()) { - return - } - - r.Sign() - }, -} - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = aws.NamedHandler{ - Name: "core.SendHandler", - Fn: func(r *aws.Request) { - sender := sendFollowRedirects - if r.DisableFollowRedirects { - sender = sendWithoutFollowRedirects - } - - if aws.NoBody == r.HTTPRequest.Body { - // Strip off the request body if the NoBody reader was used as a - // place holder for a request body. This prevents the SDK from - // making requests with a request body when it would be invalid - // to do so. - // - // Use a shallow copy of the http.Request to ensure the race condition - // of transport on Body will not trigger - reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest - reqCopy.Body = nil - r.HTTPRequest = &reqCopy - defer func() { - r.HTTPRequest = reqOrig - }() - } - - var err error - r.HTTPResponse, err = sender(r) - if err != nil { - handleSendError(r, err) - } - }, -} - -func sendFollowRedirects(r *aws.Request) (*http.Response, error) { - return r.Config.HTTPClient.Do(r.HTTPRequest) -} - -func sendWithoutFollowRedirects(r *aws.Request) (*http.Response, error) { - transport := r.Config.HTTPClient.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return transport.RoundTrip(r.HTTPRequest) -} - -func handleSendError(r *aws.Request, err error) { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other URL redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable - - // Override the error with a context canceled error, if that was canceled. - ctx := r.Context() - select { - case <-ctx.Done(): - r.Error = awserr.New(aws.ErrCodeRequestCanceled, - "request context canceled", ctx.Err()) - r.Retryable = aws.Bool(false) - default: - } -} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = aws.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *aws.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", nil) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = aws.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *aws.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || r.Config.EnforceShouldRetryCheck { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - - if err := sdk.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(aws.ErrCodeRequestCanceled, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if p, ok := r.Config.Credentials.(sdk.Invalidator); ok && r.IsErrorExpired() { - p.Invalidate() - } - - r.RetryCount++ - r.Error = nil - } -}} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = aws.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *aws.Request) { - if r.Metadata.SigningRegion == "" && r.Config.Region == "" { - r.Error = aws.ErrMissingRegion - } else if r.Metadata.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/param_validator.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/param_validator.go deleted file mode 100644 index cbac627a..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/param_validator.go +++ /dev/null @@ -1,19 +0,0 @@ -package defaults - -import ( - "github.com/aws/aws-sdk-go-v2/aws" -) - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = aws.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *aws.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(aws.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/user_agent_handlers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/user_agent_handlers.go deleted file mode 100644 index c911f066..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/user_agent_handlers.go +++ /dev/null @@ -1,36 +0,0 @@ -package defaults - -import ( - "os" - "runtime" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version -// to the user agent. -var SDKVersionUserAgentHandler = aws.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: aws.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -const execEnvVar = `AWS_EXECUTION_ENV` -const execEnvUAKey = `exec_env` - -// AddHostExecEnvUserAgentHander is a request handler appending the SDK's -// execution environment to the user agent. -// -// If the environment variable AWS_EXECUTION_ENV is set, its value will be -// appended to the user agent string. -var AddHostExecEnvUserAgentHander = aws.NamedHandler{ - Name: "core.AddHostExecEnvUserAgentHander", - Fn: func(r *aws.Request) { - v := os.Getenv(execEnvVar) - if len(v) == 0 { - return - } - - aws.AddToUserAgent(r, execEnvUAKey+"/"+v) - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go deleted file mode 100644 index 4fcb6161..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.StringValue(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.StringValueSlice(strPtrs) -// -// SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/api.go deleted file mode 100644 index 3233acd0..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/api.go +++ /dev/null @@ -1,162 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "net/http" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -// GetMetadata uses the path provided to request information from the EC2 -// instance metdata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - op := &aws.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetUserData returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserData() (string, error) { - op := &aws.Operation{ - Name: "GetUserData", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "user-data"), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.Handlers.UnmarshalError.PushBack(func(r *aws.Request) { - if r.HTTPResponse.StatusCode == http.StatusNotFound { - r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) - } - }) - - return output.Content, req.Send() -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - op := &aws.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New("SerializationError", - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - resp, err := c.GetMetadata("placement/availability-zone") - if err != nil { - return "", err - } - - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 - return resp[:len(resp)-1], nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshaling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/service.go deleted file mode 100644 index 5c0a7e69..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/service.go +++ /dev/null @@ -1,110 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -// -// This package's client can be disabled completely by setting the environment -// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to -// true instructs the SDK to disable the EC2 Metadata client. The client cannot -// be used while the environemnt variable is set to true, (case insensitive). -package ec2metadata - -import ( - "bytes" - "errors" - "io" - "net/http" - "os" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/aws/defaults" -) - -// ServiceName is the name of the service. -const ServiceName = "ec2metadata" -const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *aws.Client -} - -// New creates a new instance of the EC2Metadata client with a Config. -// This client is safe to use across multiple goroutines. -// -// Example: -// // Create a EC2Metadata client from just a config. -// svc := ec2metadata.New(cfg) -func New(config aws.Config) *EC2Metadata { - svc := &EC2Metadata{ - Client: aws.NewClient( - config, - aws.Metadata{ - ServiceName: ServiceName, - APIVersion: "latest", - }, - ), - } - - svc.Handlers.Unmarshal.PushBack(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Disable the EC2 Metadata service if the environment variable is set. - // This shortcirctes the service's functionality to always fail to send - // requests. - if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { - svc.Handlers.Send.SwapNamed(aws.NamedHandler{ - Name: defaults.SendHandler.Name, - Fn: func(r *aws.Request) { - r.Error = awserr.New( - aws.ErrCodeRequestCanceled, - "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", - nil) - }, - }) - } - - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -func unmarshalHandler(r *aws.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } -} - -func unmarshalError(r *aws.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) -} - -func validateEndpointHandler(r *aws.Request) { - if r.Metadata.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds/provider.go deleted file mode 100644 index b8290bf2..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds/provider.go +++ /dev/null @@ -1,155 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/aws/ec2metadata" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A Provider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// The NewProvider function must be used to create the Provider. -// -// p := &ec2rolecreds.NewProvider(ec2metadata.New(cfg)) -// -// // Expire the credentials 10 minutes before IAM states they should. Proactivily -// // refreshing the credentials. -// p.ExpiryWindow = 10 * time.Minute -type Provider struct { - aws.SafeCredentialsProvider - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewProvider returns an initialized Provider value configured to retrieve -// credentials from EC2 Instance Metadata service. -func NewProvider(client *ec2metadata.EC2Metadata) *Provider { - p := &Provider{ - Client: client, - } - p.RetrieveFn = p.retrieveFn - - return p -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (p *Provider) retrieveFn() (aws.Credentials, error) { - credsList, err := requestCredList(p.Client) - if err != nil { - return aws.Credentials{}, err - } - - if len(credsList) == 0 { - return aws.Credentials{}, - awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(p.Client, credsName) - if err != nil { - return aws.Credentials{}, err - } - - creds := aws.Credentials{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - Source: ProviderName, - - CanExpire: true, - Expires: roleCreds.Expiration.Add(-p.ExpiryWindow), - } - - return creds, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "/iam/security-credentials" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New("SerializationError", - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpointcreds/provider.go deleted file mode 100644 index dac05347..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpointcreds/provider.go +++ /dev/null @@ -1,167 +0,0 @@ -// Package endpointcreds provides support for retrieving credentials from an -// arbitrary HTTP endpoint. -// -// The credentials endpoint Provider can receive both static and refreshable -// credentials that will expire. Credentials are static when an "Expiration" -// value is not provided in the endpoint's response. -// -// Static credentials will never expire once they have been retrieved. The format -// of the static credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } -// -// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration -// value in the response. The format of the refreshable credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } -// -// Errors should be returned in the following format and only returned with 400 -// or 500 HTTP status codes. -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } -package endpointcreds - -import ( - "encoding/json" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -// ProviderName is the name of the credentials provider. -const ProviderName = `CredentialsEndpointProvider` - -// Provider satisfies the aws.CredentialsProvider interface, and is a client to -// retrieve credentials from an arbitrary endpoint. -type Provider struct { - aws.SafeCredentialsProvider - - // The AWS Client to make HTTP requests to the endpoint with. The endpoint - // the request will be made to is provided by the aws.Config's - // EndpointResolver. - Client *aws.Client - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// New returns a credentials Provider for retrieving AWS credentials -// from arbitrary endpoint. -func New(cfg aws.Config) *Provider { - p := &Provider{ - Client: aws.NewClient( - cfg, - aws.Metadata{ - ServiceName: ProviderName, - }, - ), - } - p.RetrieveFn = p.retrieveFn - - p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) - p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) - p.Client.Handlers.Validate.Clear() - p.Client.Handlers.Validate.PushBack(validateEndpointHandler) - - return p -} - -// Retrieve will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) retrieveFn() (aws.Credentials, error) { - resp, err := p.getCredentials() - if err != nil { - return aws.Credentials{}, - awserr.New("CredentialsEndpointError", "failed to load credentials", err) - } - - creds := aws.Credentials{ - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.Token, - Source: ProviderName, - } - - if resp.Expiration != nil { - creds.CanExpire = true - creds.Expires = resp.Expiration.Add(-p.ExpiryWindow) - } - - return creds, nil -} - -type getCredentialsOutput struct { - Expiration *time.Time - AccessKeyID string - SecretAccessKey string - Token string -} - -type errorOutput struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (p *Provider) getCredentials() (*getCredentialsOutput, error) { - op := &aws.Operation{ - Name: "GetCredentials", - HTTPMethod: "GET", - } - - out := &getCredentialsOutput{} - req := p.Client.NewRequest(op, nil, out) - req.HTTPRequest.Header.Set("Accept", "application/json") - - return out, req.Send() -} - -func validateEndpointHandler(r *aws.Request) { - if len(r.Metadata.Endpoint) == 0 { - r.Error = aws.ErrMissingEndpoint - } -} - -func unmarshalHandler(r *aws.Request) { - defer r.HTTPResponse.Body.Close() - - out := r.Data.(*getCredentialsOutput) - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New("SerializationError", - "failed to decode endpoint credentials", - err, - ) - } -} - -func unmarshalError(r *aws.Request) { - defer r.HTTPResponse.Body.Close() - - var errOut errorOutput - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { - r.Error = awserr.New("SerializationError", - "failed to decode endpoint credentials", - err, - ) - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New(errOut.Code, errOut.Message, nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go deleted file mode 100644 index 3dad528d..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go +++ /dev/null @@ -1,52 +0,0 @@ -package aws - -// EndpointResolver resolves an endpoint for a service endpoint id and region. -type EndpointResolver interface { - ResolveEndpoint(service, region string) (Endpoint, error) -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies the -// Resolver interface. This is useful when you want to add additional endpoint -// resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(service, region string) (Endpoint, error) - -// ResolveEndpoint calls EndpointResolverFunc returning the endpoint, or error. -func (fn EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { - return fn(service, region) -} - -// ResolveWithEndpoint allows a static Resolved Endpoint to be used as an endpoint resolver -type ResolveWithEndpoint Endpoint - -// ResolveWithEndpointURL allows a static URL to be used as a endpoint resolver. -func ResolveWithEndpointURL(url string) ResolveWithEndpoint { - return ResolveWithEndpoint(Endpoint{URL: url}) -} - -// ResolveEndpoint returns the static endpoint. -func (v ResolveWithEndpoint) ResolveEndpoint(service, region string) (Endpoint, error) { - e := Endpoint(v) - e.SigningRegion = region - return e, nil -} - -// Endpoint represents the endpoint a service client should make requests to. -type Endpoint struct { - // The URL of the endpoint. - URL string - - // The service name that should be used for signing the requests to the - // endpoint. - SigningName string - - // The region that should be used for signing the request to the endpoint. - SigningRegion string - - // States that the signing name for this endpoint was derived from metadata - // passed in, but was not explicitly modeled. - SigningNameDerived bool - - // The signing method that should be used for signign the requests to the - // endpoint. - SigningMethod string -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/decode.go deleted file mode 100644 index a38185f0..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/decode.go +++ /dev/null @@ -1,148 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -type modelDefinition map[string]json.RawMessage - -// A DecodeModelOptions are the options for how the endpoints model definition -// are decoded. -type DecodeModelOptions struct { - SkipCustomizations bool -} - -// Set combines all of the option functions together. -func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// DecodeModel unmarshals a Regions and Endpoint model definition file into -// a endpoint Resolver. If the file format is not supported, or an error occurs -// when unmarshaling the model an error will be returned. -// -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. -// -// resolver, err := endpoints.DecodeModel(reader) -// -// partitions := resolver.Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } -func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (*Resolver, error) { - var opts DecodeModelOptions - opts.Set(optFns...) - - // Get the version of the partition file to determine what - // unmarshaling model to use. - modelDef := modelDefinition{} - if err := json.NewDecoder(r).Decode(&modelDef); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - var version string - if b, ok := modelDef["version"]; ok { - version = string(b) - } else { - return nil, newDecodeModelError("endpoints version not found in model", nil) - } - - if version == "3" { - return decodeV3Endpoints(modelDef, opts) - } - - return nil, newDecodeModelError( - fmt.Sprintf("endpoints version %s, not supported", version), nil) -} - -func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (*Resolver, error) { - b, ok := modelDef["partitions"] - if !ok { - return nil, newDecodeModelError("endpoints model missing partitions", nil) - } - - ps := partitions{} - if err := json.Unmarshal(b, &ps); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - if opts.SkipCustomizations { - return &Resolver{partitions: ps}, nil - } - - // Customization - for i := 0; i < len(ps); i++ { - p := &ps[i] - custAddEC2Metadata(p) - custAddS3DualStack(p) - custSetUnresolveServices(p) - } - - return &Resolver{partitions: ps}, nil -} - -func custAddS3DualStack(p *partition) { - if p.ID != "aws" { - return - } - - custAddDualstack(p, "s3") - custAddDualstack(p, "s3-control") -} - -func custAddDualstack(p *partition, svcName string) { - s, ok := p.Services[svcName] - if !ok { - return - } - - s.Defaults.HasDualStack = boxedTrue - s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - - p.Services[svcName] = s -} - -func custAddEC2Metadata(p *partition) { - p.Services["ec2metadata"] = service{ - IsRegionalized: boxedFalse, - PartitionEndpoint: "aws-global", - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - } -} - -func custSetUnresolveServices(p *partition) { - var ids = map[string]struct{}{ - "data.iot": {}, - "cloudsearchdomain": {}, - } - for id := range ids { - p.Services[id] = service{ - Defaults: endpoint{ - Unresolveable: boxedTrue, - }, - } - } -} - -type decodeModelError struct { - awsError -} - -func newDecodeModelError(msg string, err error) decodeModelError { - return decodeModelError{ - awsError: awserr.New("DecodeEndpointsModelError", msg, err), - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/defaults.go deleted file mode 100644 index 24167e5f..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/defaults.go +++ /dev/null @@ -1,3902 +0,0 @@ -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - -// Partition identifiers -const ( - AwsPartitionID = "aws" // AWS Standard partition. - AwsCnPartitionID = "aws-cn" // AWS China partition. - AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. -) - -// AWS Standard partition's regions. -const ( - ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). - ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). - ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). - ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). - ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). - CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). - EuNorth1RegionID = "eu-north-1" // EU (Stockholm). - EuWest1RegionID = "eu-west-1" // EU (Ireland). - EuWest2RegionID = "eu-west-2" // EU (London). - EuWest3RegionID = "eu-west-3" // EU (Paris). - SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). - UsEast1RegionID = "us-east-1" // US East (N. Virginia). - UsEast2RegionID = "us-east-2" // US East (Ohio). - UsWest1RegionID = "us-west-1" // US West (N. California). - UsWest2RegionID = "us-west-2" // US West (Oregon). -) - -// AWS China partition's regions. -const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). - CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). -) - -// AWS GovCloud (US) partition's regions. -const ( - UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). -) - -// Service identifiers -const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AppsyncServiceID = "appsync" // Appsync. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - ChimeServiceID = "chime" // Chime. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudsearchdomainServiceID = "cloudsearchdomain" // Cloudsearchdomain. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DataIotServiceID = "data.iot" // DataIot. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - IotanalyticsServiceID = "iotanalytics" // Iotanalytics. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - RobomakerServiceID = "robomaker" // Robomaker. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - S3ControlServiceID = "s3-control" // S3Control. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TransferServiceID = "transfer" // Transfer. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) - -// NewDefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). -// -// Use DefaultPartitions() to get the list of the default partitions. -func NewDefaultResolver() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -// DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). -// -// partitions := endpoints.DefaultPartitions -// for _, p := range partitions { -// // ... inspect partitions -// } -func DefaultPartitions() Partitions { - return defaultPartitions.Partitions() -} - -var defaultPartitions = partitions{ - awsPartition, - awscnPartition, - awsusgovPartition, -} - -// AwsPartition returns the Resolver for AWS Standard. -func AwsPartition() Partition { - return awsPartition.Partition() -} - -var awsPartition = partition{ - ID: "aws", - Name: "AWS Standard", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "ap-northeast-1": region{ - Description: "Asia Pacific (Tokyo)", - }, - "ap-northeast-2": region{ - Description: "Asia Pacific (Seoul)", - }, - "ap-south-1": region{ - Description: "Asia Pacific (Mumbai)", - }, - "ap-southeast-1": region{ - Description: "Asia Pacific (Singapore)", - }, - "ap-southeast-2": region{ - Description: "Asia Pacific (Sydney)", - }, - "ca-central-1": region{ - Description: "Canada (Central)", - }, - "eu-central-1": region{ - Description: "EU (Frankfurt)", - }, - "eu-north-1": region{ - Description: "EU (Stockholm)", - }, - "eu-west-1": region{ - Description: "EU (Ireland)", - }, - "eu-west-2": region{ - Description: "EU (London)", - }, - "eu-west-3": region{ - Description: "EU (Paris)", - }, - "sa-east-1": region{ - Description: "South America (Sao Paulo)", - }, - "us-east-1": region{ - Description: "US East (N. Virginia)", - }, - "us-east-2": region{ - Description: "US East (Ohio)", - }, - "us-west-1": region{ - Description: "US West (N. California)", - }, - "us-west-2": region{ - Description: "US West (Oregon)", - }, - }, - Services: services{ - "a4b": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "acm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.mediatailor": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "api.pricing": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appsync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "autoscaling-plans", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "budgets.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "ce.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "chime": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "service.chime.aws.amazon.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "service.chime.aws.amazon.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloud9": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudsearch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudsearchdomain": service{ - Defaults: endpoint{ - Unresolveable: boxedTrue, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "codepipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codestar": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-sync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cur": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "data.iot": service{ - Defaults: endpoint{ - Unresolveable: boxedTrue, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dax": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "discovery": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecr": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elastictranscoder": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotanalytics": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisvideo": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lightsail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "medialive": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediapackage": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mgh": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "mobileanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "opsworks": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "opsworks-cm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "resource-groups": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "robomaker": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "s3": service{ - PartitionEndpoint: "us-east-1", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-west-2": endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3-control.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "s3-control.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "s3-control.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "s3-control.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3-control.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "s3-control.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "s3-control.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "s3-control.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "s3-control.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "s3-control.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3-control.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "s3-control.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "s3-control.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-east-2-fips": endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "s3-control.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-west-2-fips": endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ap-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ca-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-2": endpoint{ - Protocols: []string{"https"}, - }, - "sa-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-2": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-2": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "servicediscovery": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "shield": service{ - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - Defaults: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{ - Hostname: "sts.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "aws-global": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "support": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transfer": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workdocs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workmail": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} - -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", - }, - }, - Services: services{ - "apigateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cloudsearchdomain": service{ - Defaults: endpoint{ - Unresolveable: boxedTrue, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "data.iot": service{ - Defaults: endpoint{ - Unresolveable: boxedTrue, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "s3-control.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-gov-east-1": region{ - Description: "AWS GovCloud (US-East)", - }, - "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", - }, - }, - Services: services{ - "acm": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "autoscaling": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "cloudsearchdomain": service{ - Defaults: endpoint{ - Unresolveable: boxedTrue, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "data.iot": service{ - Defaults: endpoint{ - Unresolveable: boxedTrue, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecr": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips-us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "s3-control.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3-control.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "sns": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sqs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/doc.go deleted file mode 100644 index faf8723f..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package endpoints provides the types and functionality for defining regions -// and endpoints, as well as querying those definitions. -// -// The SDK's Regions and Endpoints metadata is code generated into the endpoints -// package, and is accessible via the DefaultResolver function. This function -// returns a endpoint Resolver will search the metadata and build an associated -// endpoint if one is found. The default resolver will search all partitions -// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and -// AWS GovCloud (US) (aws-us-gov). -// . -// -// Enumerating Regions and Endpoint Metadata -// -// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface -// will allow you to get access to the list of underlying Partitions with the -// Partitions method. This is helpful if you want to limit the SDK's endpoint -// resolving to a single partition, or enumerate regions, services, and endpoints -// in the partition. -// -// resolver := endpoints.NewDefaultResolver() -// partitions := resolver.Partitions() -// -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } -// -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } -// -// Using Custom Endpoints -// -// The endpoints package also gives you the ability to use your own logic how -// endpoints are resolved. This is a great way to define a custom endpoint -// for select services, without passing that logic down through your code. -// -// If a type implements the Resolver interface it can be used to resolve -// endpoints. To use this with the SDK's Session and Config set the value -// of the type to the EndpointsResolver field of aws.Config when initializing -// the service client. -// -// In addition the ResolverFunc is a wrapper for a func matching the signature -// of Resolver.ResolveEndpoint, converting it to a type that satisfies the -// Resolver interface. -// -// -// defaultResolver := endpoints.NewDefaultResolver() -// myCustomResolver := func(service, region string) (aws.Endpoint, error) { -// if service == endpoints.S3ServiceID { -// return aws.Endpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } -// -// return defaultResolver.ResolveEndpoint(service, region) -// } -// -// cfg, err := external.LoadDefaultAWSConfig() -// if err != nil { -// panic(err) -// } -// cfg.Region = "us-west-2" -// cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver) -package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/endpoints.go deleted file mode 100644 index 194b57b6..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/endpoints.go +++ /dev/null @@ -1,344 +0,0 @@ -package endpoints - -import ( - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -// ResolveOptions provide the configuration needed to direct how the -// endpoints will be resolved. -type ResolveOptions struct { - // DisableSSL forces the endpoint to be resolved as HTTP. - // instead of HTTPS if the service supports it. - DisableSSL bool - - // Sets the resolver to resolve the endpoint as a dualstack endpoint - // for the service. If dualstack support for a service is not known and - // StrictMatching is not enabled a dualstack endpoint for the service will - // be returned. This endpoint may not be valid. If StrictMatching is - // enabled only services that are known to support dualstack will return - // dualstack endpoints. - UseDualStack bool - - // Enables strict matching of services and regions resolved endpoints. - // If the partition doesn't enumerate the exact service and region an - // error will be returned. This option will prevent returning endpoints - // that look valid, but may not resolve to any real endpoint. - StrictMatching bool -} - -// A Resolver provides endpoint resolution based on modeled endpoint data. -type Resolver struct { - ResolveOptions - - partitions partitions -} - -// ResolveEndpoint attempts to resolve an endpoint againsted the modeled endpoint -// data. If an endpoint is found it will be returned. An error will be returned -// otherwise. -// -// Searches through the partitions in the order they are defined. -func (r *Resolver) ResolveEndpoint(service, region string) (aws.Endpoint, error) { - return r.partitions.EndpointFor(service, region, r.ResolveOptions) -} - -// Partitions returns the partitions that make up the resolver. -func (r *Resolver) Partitions() Partitions { - return r.partitions.Partitions() -} - -// Partitions is a slice of paritions describing regions and endpoints -type Partitions []Partition - -// ForRegion returns the first partition which includes the region -// passed in. This includes both known regions and regions which match -// a pattern supported by the partition which may include regions that are -// not explicitly known by the partition. Use the Regions method of the -// returned Partition if explicit support is needed. -func (ps Partitions) ForRegion(id string) (Partition, bool) { - for _, p := range ps { - if _, ok := p.p.Regions[id]; ok || p.p.RegionRegex.MatchString(id) { - return p, true - } - } - - return Partition{}, false -} - -// ForPartition returns the parition with the matching ID passed in. -func (ps Partitions) ForPartition(id string) (Partition, bool) { - for _, p := range ps { - if p.ID() == id { - return p, true - } - } - - return Partition{}, false -} - -// A Partition provides the ability to enumerate the partition's regions -// and services. -type Partition struct { - id string - p *partition -} - -// ID returns the identifier of the partition. -func (p Partition) ID() string { return p.id } - -// Endpoint attempts to resolve the endpoint based on service and region. -// See Options for information on configuring how the endpoint is resolved. -// -// If the service cannot be found in the metadata the endpoint will be resolved -// based on the parition's endpoint pattern, and service endpoint prefix. -// -// When resolving endpoints you can choose to enable StrictMatching. This will -// require the provided service and region to be known by the partition. -// If the endpoint cannot be strictly resolved an error will be returned. This -// mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned my look valid but may not work. -// StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expansions. -// -// Errors that can be returned. -// * UnknownServiceError -// * UnknownEndpointError -func (p Partition) Endpoint(service, region string, opts ResolveOptions) (aws.Endpoint, error) { - return p.p.EndpointFor(service, region, opts) -} - -// Regions returns a map of Regions indexed by their ID. This is useful for -// enumerating over the regions in a partition. -func (p Partition) Regions() map[string]Region { - rs := map[string]Region{} - for id := range p.p.Regions { - rs[id] = Region{ - id: id, - p: p.p, - } - } - - return rs -} - -// RegionsForService returns the map of regions for the service id specified. -// false is returned if the service is not found in the partition. -func (p Partition) RegionsForService(id string) (map[string]Region, bool) { - if _, ok := p.p.Services[id]; !ok { - return nil, false - } - - s := Service{ - id: id, - p: p.p, - } - return s.Regions(), true -} - -// Services returns a map of Service indexed by their ID. This is useful for -// enumerating over the services in a partition. -func (p Partition) Services() map[string]Service { - ss := map[string]Service{} - for id := range p.p.Services { - ss[id] = Service{ - id: id, - p: p.p, - } - } - - return ss -} - -// Resolver returns an endpoint resolver for the partitions. Use this to satisfy -// the SDK's EndpointResolver. -func (p Partition) Resolver() *Resolver { - return &Resolver{ - partitions: partitions{*p.p}, - } -} - -// A Region provides information about a region, and ability to resolve an -// endpoint from the context of a region, given a service. -type Region struct { - id, desc string - p *partition -} - -// ID returns the region's identifier. -func (r Region) ID() string { return r.id } - -// Endpoint resolves an endpoint from the context of the region given -// a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r Region) Endpoint(service string, opts ResolveOptions) (aws.Endpoint, error) { - return r.p.EndpointFor(service, r.id, opts) -} - -// Services returns a list of all services that are known to be in this region. -func (r Region) Services() map[string]Service { - ss := map[string]Service{} - for id, s := range r.p.Services { - if _, ok := s.Endpoints[r.id]; ok { - ss[id] = Service{ - id: id, - p: r.p, - } - } - } - - return ss -} - -// A Service provides information about a service, and ability to resolve an -// endpoint from the context of a service, given a region. -type Service struct { - id string - p *partition -} - -// ID returns the identifier for the service. -func (s Service) ID() string { return s.id } - -// Endpoint resolves an endpoint from the context of a service given -// a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s Service) Endpoint(region string, opts ResolveOptions) (aws.Endpoint, error) { - return s.p.EndpointFor(s.id, region, opts) -} - -// Regions returns a map of Regions that the service is present in. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Regions() map[string]Region { - rs := map[string]Region{} - for id := range s.p.Services[s.id].Endpoints { - if _, ok := s.p.Regions[id]; ok { - rs[id] = Region{ - id: id, - p: s.p, - } - } - } - - return rs -} - -// Endpoints returns a map of Endpoints indexed by their ID for all known -// endpoints for a service. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Endpoints() map[string]Endpoint { - es := map[string]Endpoint{} - for id := range s.p.Services[s.id].Endpoints { - es[id] = Endpoint{ - id: id, - serviceID: s.id, - p: s.p, - } - } - - return es -} - -// A Endpoint provides information about endpoints, and provides the ability -// to resolve that endpoint for the service, and the region the endpoint -// represents. -type Endpoint struct { - id string - serviceID string - p *partition -} - -// ID returns the identifier for an endpoint. -func (e Endpoint) ID() string { return e.id } - -// ServiceID returns the identifier the endpoint belongs to. -func (e Endpoint) ServiceID() string { return e.serviceID } - -// Resolve resolves an endpoint from the context of a service and -// region the endpoint represents. See Partition.EndpointFor for usage and -// errors that can be returned. -func (e Endpoint) Resolve(opts ResolveOptions) (aws.Endpoint, error) { - return e.p.EndpointFor(e.serviceID, e.id, opts) -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A UnknownServiceError is returned when the service does not resolve to an -// endpoint. Includes a list of all known services for the partition. Returned -// when a partition does not support the service. -type UnknownServiceError struct { - awsError - Partition string - Service string - Known []string -} - -// NewUnknownServiceError builds and returns UnknownServiceError. -func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { - return UnknownServiceError{ - awsError: awserr.New("UnknownServiceError", - "could not resolve endpoint for unknown service", nil), - Partition: p, - Service: s, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownServiceError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q", - e.Partition, e.Service) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownServiceError) String() string { - return e.Error() -} - -// A UnknownEndpointError is returned when in StrictMatching mode and the -// service is valid, but the region does not resolve to an endpoint. Includes -// a list of all known endpoints for the service. -type UnknownEndpointError struct { - awsError - Partition string - Service string - Region string - Known []string -} - -// NewUnknownEndpointError builds and returns UnknownEndpointError. -func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { - return UnknownEndpointError{ - awsError: awserr.New("UnknownEndpointError", - "could not resolve endpoint", nil), - Partition: p, - Service: s, - Region: r, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q, region: %q", - e.Partition, e.Service, e.Region) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) String() string { - return e.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model.go deleted file mode 100644 index d3bcbb80..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model.go +++ /dev/null @@ -1,313 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -type partitions []partition - -func (ps partitions) EndpointFor(service, region string, opts ResolveOptions) (aws.Endpoint, error) { - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opts.StrictMatching) { - continue - } - - return ps[i].EndpointFor(service, region, opts) - } - - // If loose matching fallback to first partition format to use - // when resolving the endpoint. - if !opts.StrictMatching && len(ps) > 0 { - return ps[0].EndpointFor(service, region, opts) - } - - return aws.Endpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) -} - -// Partitions satisfies the EnumPartitions interface and returns a list -// of Partitions representing each partition represented in the SDK's -// endpoints model. -func (ps partitions) Partitions() Partitions { - parts := make(Partitions, 0, len(ps)) - for i := 0; i < len(ps); i++ { - parts = append(parts, ps[i].Partition()) - } - - return parts -} - -type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpoint `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` -} - -func (p partition) Partition() Partition { - return Partition{ - id: p.ID, - p: &p, - } -} - -func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { - s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[region] - - if hasEndpoint && hasService { - return true - } - - if strictMatch { - return false - } - - return p.RegionRegex.MatchString(region) -} - -func (p partition) EndpointFor(service, region string, opts ResolveOptions) (resolved aws.Endpoint, err error) { - s, hasService := p.Services[service] - if !hasService && opts.StrictMatching { - // Only return error if the resolver will not fallback to creating - // endpoint based on service endpoint ID passed in. - return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) - } - - e, hasEndpoint := s.endpointForRegion(region) - if !hasEndpoint && opts.StrictMatching { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) - } - - defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, region, p.DNSSuffix, defs, opts), nil -} - -func serviceList(ss services) []string { - list := make([]string, 0, len(ss)) - for k := range ss { - list = append(list, k) - } - return list -} -func endpointList(es endpoints) []string { - list := make([]string, 0, len(es)) - for k := range es { - list = append(list, k) - } - return list -} - -type regionRegex struct { - *regexp.Regexp -} - -func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { - // Strip leading and trailing quotes - regex, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("unable to strip quotes from regex, %v", err) - } - - rr.Regexp, err = regexp.Compile(regex) - if err != nil { - return fmt.Errorf("unable to unmarshal region regex, %v", err) - } - return nil -} - -type regions map[string]region - -type region struct { - Description string `json:"description"` -} - -type services map[string]service - -type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpoint `json:"defaults"` - Endpoints endpoints `json:"endpoints"` -} - -func (s *service) endpointForRegion(region string) (endpoint, bool) { - if s.IsRegionalized == boxedFalse { - return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint - } - - if e, ok := s.Endpoints[region]; ok { - return e, true - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return endpoint{}, false -} - -type endpoints map[string]endpoint - -type endpoint struct { - // True if the endpoint cannot be resolved for this partition/region/service - Unresolveable boxedBool `json:"-"` - - Hostname string `json:"hostname"` - Protocols []string `json:"protocols"` - CredentialScope credentialScope `json:"credentialScope"` - - // Custom fields not modeled - HasDualStack boxedBool `json:"-"` - DualStackHostname string `json:"-"` - - // Signature Version not used - SignatureVersions []string `json:"signatureVersions"` - - // SSLCommonName not used. - SSLCommonName string `json:"sslCommonName"` -} - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "v2"} -) - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts ResolveOptions) aws.Endpoint { - var merged endpoint - for _, def := range defs { - merged.mergeIn(def) - } - merged.mergeIn(e) - e = merged - - var u string - if e.Unresolveable != boxedTrue { - // Only attempt to resolve the endpoint if it can be resolved. - hostname := e.Hostname - - // Offset the hostname for dualstack if enabled - if opts.UseDualStack && e.HasDualStack == boxedTrue { - hostname = e.DualStackHostname - } - - u = strings.Replace(hostname, "{service}", service, 1) - u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) - - scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) - u = fmt.Sprintf("%s://%s", scheme, u) - } - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - - return aws.Endpoint{ - URL: u, - SigningRegion: signingRegion, - SigningName: signingName, - SigningNameDerived: signingNameDerived, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } -} - -func getEndpointScheme(protocols []string, disableSSL bool) string { - if disableSSL { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func (e *endpoint) mergeIn(other endpoint) { - if other.Unresolveable != boxedBoolUnset { - e.Unresolveable = other.Unresolveable - } - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SSLCommonName) > 0 { - e.SSLCommonName = other.SSLCommonName - } - if other.HasDualStack != boxedBoolUnset { - e.HasDualStack = other.HasDualStack - } - if len(other.DualStackHostname) > 0 { - e.DualStackHostname = other.DualStackHostname - } -} - -type credentialScope struct { - Region string `json:"region"` - Service string `json:"service"` -} - -type boxedBool int - -func (b *boxedBool) UnmarshalJSON(buf []byte) error { - v, err := strconv.ParseBool(string(buf)) - if err != nil { - return err - } - - if v { - *b = boxedTrue - } else { - *b = boxedFalse - } - - return nil -} - -const ( - boxedBoolUnset boxedBool = iota - boxedFalse - boxedTrue -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model_codegen.go deleted file mode 100644 index 033afb26..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model_codegen.go +++ /dev/null @@ -1,330 +0,0 @@ -// +build codegen - -package endpoints - -import ( - "fmt" - "io" - "reflect" - "strings" - "text/template" - "unicode" -) - -// A CodeGenOptions are the options for code generating the endpoints into -// Go code from the endpoints model definition. -type CodeGenOptions struct { - // Options for how the model will be decoded. - DecodeModelOptions DecodeModelOptions -} - -// Set combines all of the option functions together -func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// CodeGenModel given a endpoints model file will decode it and attempt to -// generate Go code from the model definition. Error will be returned if -// the code is unable to be generated, or decoded. -func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { - var opts CodeGenOptions - opts.Set(optFns...) - - resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { - *d = opts.DecodeModelOptions - }) - if err != nil { - return err - } - - tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver.partitions); err != nil { - return fmt.Errorf("failed to execute template, %v", err) - } - - return nil -} - -func toSymbol(v string) string { - out := []rune{} - for _, c := range strings.Title(v) { - if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { - continue - } - - out = append(out, c) - } - - return string(out) -} - -func quoteString(v string) string { - return fmt.Sprintf("%q", v) -} - -func regionConstName(p, r string) string { - return toSymbol(p) + toSymbol(r) -} - -func partitionGetter(id string) string { - return fmt.Sprintf("%sPartition", toSymbol(id)) -} - -func partitionVarName(id string) string { - return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) -} - -func listPartitionNames(ps partitions) string { - names := []string{} - switch len(ps) { - case 1: - return ps[0].Name - case 2: - return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) - default: - for i, p := range ps { - if i == len(ps)-1 { - names = append(names, "and "+p.Name) - } else { - names = append(names, p.Name) - } - } - return strings.Join(names, ", ") - } -} - -func boxedBoolIfSet(msg string, v boxedBool) string { - switch v { - case boxedTrue: - return fmt.Sprintf(msg, "boxedTrue") - case boxedFalse: - return fmt.Sprintf(msg, "boxedFalse") - default: - return "" - } -} - -func stringIfSet(msg, v string) string { - if len(v) == 0 { - return "" - } - - return fmt.Sprintf(msg, v) -} - -func stringSliceIfSet(msg string, vs []string) string { - if len(vs) == 0 { - return "" - } - - names := []string{} - for _, v := range vs { - names = append(names, `"`+v+`"`) - } - - return fmt.Sprintf(msg, strings.Join(names, ",")) -} - -func endpointIsSet(v endpoint) bool { - return !reflect.DeepEqual(v, endpoint{}) -} - -func serviceSet(ps partitions) map[string]struct{} { - set := map[string]struct{}{} - for _, p := range ps { - for id := range p.Services { - set[id] = struct{}{} - } - } - - return set -} - -var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, -} - -const v3Tmpl = ` -{{ define "defaults" -}} -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - - {{ template "partition consts" . }} - - {{ range $_, $partition := . }} - {{ template "partition region consts" $partition }} - {{ end }} - - {{ template "service consts" . }} - - {{ template "endpoint resolvers" . }} -{{- end }} - -{{ define "partition consts" }} - // Partition identifiers - const ( - {{ range $_, $p := . -}} - {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. - {{ end -}} - ) -{{- end }} - -{{ define "partition region consts" }} - // {{ .Name }} partition's regions. - const ( - {{ range $id, $region := .Regions -}} - {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. - {{ end -}} - ) -{{- end }} - -{{ define "service consts" }} - // Service identifiers - const ( - {{ $serviceSet := ServicesSet . -}} - {{ range $id, $_ := $serviceSet -}} - {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. - {{ end -}} - ) -{{- end }} - -{{ define "endpoint resolvers" }} - // NewDefaultResolver returns an Endpoint resolver that will be able - // to resolve endpoints for: {{ ListPartitionNames . }}. - // - // Use DefaultPartitions() to get the list of the default partitions. - func NewDefaultResolver() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } - } - - // DefaultPartitions returns a list of the partitions the SDK is bundled - // with. The available partitions are: {{ ListPartitionNames . }}. - // - // partitions := endpoints.DefaultPartitions - // for _, p := range partitions { - // // ... inspect partitions - // } - func DefaultPartitions() Partitions { - return defaultPartitions.Partitions() - } - - var defaultPartitions = partitions{ - {{ range $_, $partition := . -}} - {{ PartitionVarName $partition.ID }}, - {{ end }} - } - - {{ range $_, $partition := . -}} - {{ $name := PartitionGetter $partition.ID -}} - // {{ $name }} returns the Resolver for {{ $partition.Name }}. - func {{ $name }}() Partition { - return {{ PartitionVarName $partition.ID }}.Partition() - } - var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} - {{ end }} -{{ end }} - -{{ define "gocode Partition" -}} -partition{ - {{ StringIfSet "ID: %q,\n" .ID -}} - {{ StringIfSet "Name: %q,\n" .Name -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults }}, - {{- end }} - Regions: {{ template "gocode Regions" .Regions }}, - Services: {{ template "gocode Services" .Services }}, -} -{{- end }} - -{{ define "gocode RegionRegex" -}} -regionRegex{ - Regexp: func() *regexp.Regexp{ - reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) - return reg - }(), -} -{{- end }} - -{{ define "gocode Regions" -}} -regions{ - {{ range $id, $region := . -}} - "{{ $id }}": {{ template "gocode Region" $region }}, - {{ end -}} -} -{{- end }} - -{{ define "gocode Region" -}} -region{ - {{ StringIfSet "Description: %q,\n" .Description -}} -} -{{- end }} - -{{ define "gocode Services" -}} -services{ - {{ range $id, $service := . -}} - "{{ $id }}": {{ template "gocode Service" $service }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Service" -}} -service{ - {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} - {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults -}}, - {{- end }} - {{ if .Endpoints -}} - Endpoints: {{ template "gocode Endpoints" .Endpoints }}, - {{- end }} -} -{{- end }} - -{{ define "gocode Endpoints" -}} -endpoints{ - {{ range $id, $endpoint := . -}} - "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoint" -}} -endpoint{ - {{ BoxedBoolIfSet "Unresolveable: %s,\n" .Unresolveable -}} - {{ StringIfSet "Hostname: %q,\n" .Hostname -}} - {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} - {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} - {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} - {{ if or .CredentialScope.Region .CredentialScope.Service -}} - CredentialScope: credentialScope{ - {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} - {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} - }, - {{- end }} - {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} - {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} - -} -{{- end }} -` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go deleted file mode 100644 index c5671378..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go-v2/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - // - // @readonly - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - // - // @readonly - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/config.go deleted file mode 100644 index 5753b432..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/config.go +++ /dev/null @@ -1,132 +0,0 @@ -package external - -import ( - "github.com/aws/aws-sdk-go-v2/aws" -) - -// DefaultConfigLoaders are a slice of functions that will read external configuration -// sources for configuration values. These values are read by the AWSConfigResolvers -// using interfaces to extract specific information from the external configuration. -var DefaultConfigLoaders = []ConfigLoader{ - LoadEnvConfig, - LoadSharedConfigIgnoreNotExist, -} - -// DefaultAWSConfigResolvers are a slice of functions that will resolve external -// configuration values into AWS configuration values. -// -// This will setup the AWS configuration's Region, -var DefaultAWSConfigResolvers = []AWSConfigResolver{ - ResolveDefaultAWSConfig, - ResolveCustomCABundle, - - ResolveRegion, - - ResolveFallbackEC2Credentials, // Initial defauilt credentails provider. - ResolveCredentialsValue, - ResolveEndpointCredentials, - ResolveContainerEndpointPathCredentials, // TODO is this order right? - ResolveAssumeRoleCredentials, -} - -// A Config represents a generic configuration value or set of values. This type -// will be used by the AWSConfigResolvers to extract -// -// General the Config type will use type assertion against the Provider interfaces -// to extract specific data from the Config. -type Config interface{} - -// A ConfigLoader is used to load external configuration data and returns it as -// a generic Config type. -// -// The loader should return an error if it fails to load the external configuration -// or the configuration data is malformed, or required components missing. -type ConfigLoader func(Configs) (Config, error) - -// An AWSConfigResolver will extract configuration data from the Configs slice -// using the provider interfaces to extract specific functionality. The extracted -// configuration values will be written to the AWS Config value. -// -// The resolver should return an error if it it fails to extract the data, the -// data is malformed, or incomplete. -type AWSConfigResolver func(cfg *aws.Config, configs Configs) error - -// Configs is a slice of Config values. These values will be used by the -// AWSConfigResolvers to extract external configuration values to populate the -// AWS Config type. -// -// Use AppendFromLoaders to add additional external Config values that are -// loaded from external sources. -// -// Use ResolveAWSConfig after external Config values have been added or loaded -// to extract the loaded configuration values into the AWS Config. -type Configs []Config - -// AppendFromLoaders iterates over the slice of loaders passed in calling each -// loader function in order. The external config value returned by the loader -// will be added to the returned Configs slice. -// -// If a loader returns an error this method will stop iterating and return -// that error. -func (cs Configs) AppendFromLoaders(loaders []ConfigLoader) (Configs, error) { - for _, fn := range loaders { - cfg, err := fn(cs) - if err != nil { - return nil, err - } - - cs = append(cs, cfg) - } - - return cs, nil -} - -// ResolveAWSConfig returns a AWS configuration populated with values by calling -// the resolvers slice passed in. Each resolver is called in order. Any resolver -// may overwrite the AWs Configuration value of a previous resolver. -// -// If an resolver returns an error this method will return that error, and stop -// iterating over the resolvers. -func (cs Configs) ResolveAWSConfig(resolvers []AWSConfigResolver) (aws.Config, error) { - var cfg aws.Config - - for _, fn := range resolvers { - if err := fn(&cfg, cs); err != nil { - // TODO provide better error? - return aws.Config{}, err - } - } - - return cfg, nil -} - -// LoadDefaultAWSConfig reads the SDK's default external configurations, and -// populates an AWS Config with the values from the external configurations. -// -// An optional variadic set of additional Config values can be provided as input -// that will be prepended to the Configs slice. Use this to add custom configuration. -// The custom configurations must satisfy the respective providers for their data -// or the custom data will be ignored by the resolvers and config loaders. -// -// cfg, err := external.LoadDefaultAWSConfig( -// WithSharedConfigProfile("test-profile"), -// ) -// if err != nil { -// panic(fmt.Sprintf("failed loading config, %v", err)) -// } -// -// -// The default configuration sources are: -// * Environment Variables -// * Shared Configuration and Shared Credentials files. -func LoadDefaultAWSConfig(configs ...Config) (aws.Config, error) { - var cfgs Configs - cfgs = append(cfgs, configs...) - - cfgs, err := cfgs.AppendFromLoaders(DefaultConfigLoaders) - if err != nil { - return aws.Config{}, err - } - - return cfgs.ResolveAWSConfig(DefaultAWSConfigResolvers) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/env_config.go deleted file mode 100644 index 902252c4..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/env_config.go +++ /dev/null @@ -1,236 +0,0 @@ -package external - -import ( - "io/ioutil" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// CredentialsSourceName provides a name of the provider when config is -// loaded from environment. -const CredentialsSourceName = "EnvConfigCredentials" - -// Environment variables that will be read for configuration values. -const ( - AWSAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" - AWSAccessKeyEnvVar = "AWS_ACCESS_KEY" - - AWSSecreteAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" - AWSSecreteKeyEnvVar = "AWS_SECRET_KEY" - - AWSSessionTokenEnvVar = "AWS_SESSION_TOKEN" - - AWSCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - - // TODO shorter name? - AWSContainerCredentialsEndpointPathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" - - AWSRegionEnvVar = "AWS_REGION" - AWSDefaultRegionEnvVar = "AWS_DEFAULT_REGION" - - AWSProfileEnvVar = "AWS_PROFILE" - AWSDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE" - - AWSSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" - - AWSConfigFileEnvVar = "AWS_CONFIG_FILE" - - AWSCustomCABundleEnvVar = "AWS_CA_BUNDLE" -) - -var ( - credAccessEnvKeys = []string{ - AWSAccessKeyIDEnvVar, - AWSAccessKeyEnvVar, - } - credSecretEnvKeys = []string{ - AWSSecreteAccessKeyEnvVar, - AWSSecreteKeyEnvVar, - } - regionEnvKeys = []string{ - AWSRegionEnvVar, - AWSDefaultRegionEnvVar, - } - profileEnvKeys = []string{ - AWSProfileEnvVar, - AWSDefaultProfileEnvVar, - } -) - -// EnvConfig is a collection of environment values the SDK will read -// setup config from. All environment values are optional. But some values -// such as credentials require multiple values to be complete or the values -// will be ignored. -type EnvConfig struct { - // Environment configuration values. If set both Access Key ID and Secret Access - // Key must be provided. Session Token and optionally also be provided, but is - // not required. - // - // # Access Key ID - // AWS_ACCESS_KEY_ID=AKID - // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - // - // # Secret Access Key - // AWS_SECRET_ACCESS_KEY=SECRET - // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - // - // # Session Token - // AWS_SESSION_TOKEN=TOKEN - Credentials aws.Credentials - - // TODO doc - CredentialsEndpoint string - - // TODO doc, shorter name? - ContainerCredentialsEndpointPath string - - // Region value will instruct the SDK where to make service API requests to. If is - // not provided in the environment the region must be provided before a service - // client request is made. - // - // AWS_REGION=us-west-2 - // AWS_DEFAULT_REGION=us-west-2 - Region string - - // Profile name the SDK should load use when loading shared configuration from the - // shared configuration files. If not provided "default" will be used as the - // profile name. - // - // AWS_PROFILE=my_profile - // AWS_DEFAULT_PROFILE=my_profile - SharedConfigProfile string - - // Shared credentials file path can be set to instruct the SDK to use an alternate - // file for the shared credentials. If not set the file will be loaded from - // $HOME/.aws/credentials on Linux/Unix based systems, and - // %USERPROFILE%\.aws\credentials on Windows. - // - // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - SharedCredentialsFile string - - // Shared config file path can be set to instruct the SDK to use an alternate - // file for the shared config. If not set the file will be loaded from - // $HOME/.aws/config on Linux/Unix based systems, and - // %USERPROFILE%\.aws\config on Windows. - // - // AWS_CONFIG_FILE=$HOME/my_shared_config - SharedConfigFile string - - // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. - // Only use this if you want to configure the SDK to use a custom set - // of CAs. - // - // Enabling this option will attempt to merge the Transport - // into the SDK's HTTP client. If the client's Transport is - // not a http.Transport an error will be returned. If the - // Transport's TLS config is set this option will cause the - // SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this setting. - // To use this option and custom HTTP client, the HTTP client needs to be provided - // when creating the config. Not the service client. - // - // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - CustomCABundle string -} - -// LoadEnvConfig reads configuration values from the OS's environment variables. -// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type. -func LoadEnvConfig(cfgs Configs) (Config, error) { - return NewEnvConfig() -} - -// NewEnvConfig retrieves the SDK's environment configuration. -// See `EnvConfig` for the values that will be retrieved. -func NewEnvConfig() (EnvConfig, error) { - var cfg EnvConfig - - creds := aws.Credentials{ - Source: CredentialsSourceName, - } - setFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) - setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) - if creds.HasKeys() { - creds.SessionToken = os.Getenv(AWSSessionTokenEnvVar) - cfg.Credentials = creds - } - - cfg.CredentialsEndpoint = os.Getenv(AWSCredentialsEndpointEnvVar) - cfg.ContainerCredentialsEndpointPath = os.Getenv(AWSContainerCredentialsEndpointPathEnvVar) - - setFromEnvVal(&cfg.Region, regionEnvKeys) - setFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) - - cfg.SharedCredentialsFile = os.Getenv(AWSSharedCredentialsFileEnvVar) - cfg.SharedConfigFile = os.Getenv(AWSConfigFileEnvVar) - - cfg.CustomCABundle = os.Getenv(AWSCustomCABundleEnvVar) - - return cfg, nil -} - -// GetRegion returns the AWS Region if set in the environment. Returns an empty -// string if not set. -func (c EnvConfig) GetRegion() (string, error) { - return c.Region, nil -} - -// GetCredentialsValue returns the AWS Credentials if both AccessKey and ScreteAccessKey -// are set in the environment. Returns a zero value Credentials if not set. -func (c EnvConfig) GetCredentialsValue() (aws.Credentials, error) { - return c.Credentials, nil -} - -// GetSharedConfigProfile returns the shared config profile if set in the -// environment. Returns an empty string if not set. -func (c EnvConfig) GetSharedConfigProfile() (string, error) { - return c.SharedConfigProfile, nil -} - -// GetCredentialsEndpoint returns the credentials endpoint string if set. -func (c EnvConfig) GetCredentialsEndpoint() (string, error) { - return c.CredentialsEndpoint, nil -} - -// GetContainerCredentialsEndpointPath returns the container credentails endpoint -// path string if set. -func (c EnvConfig) GetContainerCredentialsEndpointPath() (string, error) { - return c.ContainerCredentialsEndpointPath, nil -} - -// GetSharedConfigFiles returns a slice of filenames set in the environment. -// -// Will return the filenames in the order of: -// * Shared Credentials -// * Shared Config -func (c EnvConfig) GetSharedConfigFiles() ([]string, error) { - files := make([]string, 0, 2) - if v := c.SharedCredentialsFile; len(v) > 0 { - files = append(files, v) - } - if v := c.SharedConfigFile; len(v) > 0 { - files = append(files, v) - } - - return files, nil -} - -// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was -func (c EnvConfig) GetCustomCABundle() ([]byte, error) { - if len(c.CustomCABundle) == 0 { - return nil, nil - } - - return ioutil.ReadFile(c.CustomCABundle) -} - -func setFromEnvVal(dst *string, keys []string) { - for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { - *dst = v - break - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/http_client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/http_client.go deleted file mode 100644 index 71e9830b..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/http_client.go +++ /dev/null @@ -1,42 +0,0 @@ -package external - -import ( - "crypto/tls" - "crypto/x509" - "net/http" - - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -func addHTTPClientCABundle(client *http.Client, pemCerts []byte) error { - var t *http.Transport - - switch v := client.Transport.(type) { - case *http.Transport: - t = v - default: - if client.Transport != nil { - return awserr.New("LoadCustomCABundleError", - "unable to set custom CA bundle trasnsport must be http.Transport type", nil) - } - } - - if t == nil { - t = &http.Transport{} - } - if t.TLSClientConfig == nil { - t.TLSClientConfig = &tls.Config{} - } - if t.TLSClientConfig.RootCAs == nil { - t.TLSClientConfig.RootCAs = x509.NewCertPool() - } - - if !t.TLSClientConfig.RootCAs.AppendCertsFromPEM(pemCerts) { - return awserr.New("LoadCustomCABundleError", - "failed to load custom CA bundle PEM file", nil) - } - - client.Transport = t - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/local.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/local.go deleted file mode 100644 index a555f42c..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/local.go +++ /dev/null @@ -1,49 +0,0 @@ -package external - -import ( - "fmt" - "net" - "net/url" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -var lookupHostFn = net.LookupHost - -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil - } - - // Host is not an ip, perform lookup - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { - return false, nil - } - } - - return true, nil -} - -func validateLocalURL(v string) error { - u, err := url.Parse(v) - if err != nil { - return err - } - - host := aws.URLHostname(u) - if len(host) == 0 { - return fmt.Errorf("unable to parse host from local HTTP cred provider URL") - } else if isLoopback, err := isLoopbackHost(host); err != nil { - return fmt.Errorf("failed to resolve host %q, %v", host, err) - } else if !isLoopback { - return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/provider.go deleted file mode 100644 index 54fe9324..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/provider.go +++ /dev/null @@ -1,325 +0,0 @@ -package external - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/ec2metadata" -) - -// SharedConfigProfileProvider provides access to the shared config profile -// name external configuration value. -type SharedConfigProfileProvider interface { - GetSharedConfigProfile() (string, error) -} - -// WithSharedConfigProfile wraps a strings to satisfy the SharedConfigProfileProvider -// interface so a slice of custom shared config files ared used when loading the -// SharedConfig. -type WithSharedConfigProfile string - -// GetSharedConfigProfile returns the shared config profile. -func (c WithSharedConfigProfile) GetSharedConfigProfile() (string, error) { - return string(c), nil -} - -// GetSharedConfigProfile searchds the Configs for a SharedConfigProfileProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func GetSharedConfigProfile(configs Configs) (string, bool, error) { - for _, cfg := range configs { - if p, ok := cfg.(SharedConfigProfileProvider); ok { - v, err := p.GetSharedConfigProfile() - if err != nil { - return "", false, err - } - if len(v) > 0 { - return v, true, nil - } - } - } - - return "", false, nil -} - -// SharedConfigFilesProvider provides access to the shared config filesnames -// external configuration value. -type SharedConfigFilesProvider interface { - GetSharedConfigFiles() ([]string, error) -} - -// WithSharedConfigFiles wraps a slice of strings to satisfy the -// SharedConfigFilesProvider interface so a slice of custom shared config files -// ared used when loading the SharedConfig. -type WithSharedConfigFiles []string - -// GetSharedConfigFiles returns the slice of shared config files. -func (c WithSharedConfigFiles) GetSharedConfigFiles() ([]string, error) { - return []string(c), nil -} - -// GetSharedConfigFiles searchds the Configs for a SharedConfigFilesProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func GetSharedConfigFiles(configs Configs) ([]string, bool, error) { - for _, cfg := range configs { - if p, ok := cfg.(SharedConfigFilesProvider); ok { - v, err := p.GetSharedConfigFiles() - if err != nil { - return nil, false, err - } - if len(v) > 0 { - return v, true, nil - } - } - } - - return nil, false, nil -} - -// CustomCABundleProvider provides access to the custom CA bundle PEM bytes. -type CustomCABundleProvider interface { - GetCustomCABundle() ([]byte, error) -} - -// WithCustomCABundle provides wrapping of a region string to satisfy the -// CustomCABundleProvider interface. -type WithCustomCABundle []byte - -// GetCustomCABundle returns the CA bundle PEM bytes. -func (v WithCustomCABundle) GetCustomCABundle() ([]byte, error) { - return []byte(v), nil -} - -// GetCustomCABundle searchds the Configs for a CustomCABundleProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func GetCustomCABundle(configs Configs) ([]byte, bool, error) { - for _, cfg := range configs { - if p, ok := cfg.(CustomCABundleProvider); ok { - v, err := p.GetCustomCABundle() - if err != nil { - return nil, false, err - } - if len(v) > 0 { - return v, true, nil - } - } - } - - return nil, false, nil -} - -// RegionProvider provides access to the region external configuration value. -type RegionProvider interface { - GetRegion() (string, error) -} - -// WithRegion provides wrapping of a region string to satisfy the RegionProvider -// interface. -type WithRegion string - -// GetRegion returns the region string. -func (v WithRegion) GetRegion() (string, error) { - return string(v), nil -} - -// GetRegion searchds the Configs for a RegionProvider and returns the value -// if found. Returns an error if a provider fails before a value is found. -func GetRegion(configs Configs) (string, bool, error) { - for _, cfg := range configs { - if p, ok := cfg.(RegionProvider); ok { - v, err := p.GetRegion() - if err != nil { - return "", false, err - } - if len(v) > 0 { - return v, true, nil - } - } - } - - return "", false, nil -} - -// CredentialsValueProvider provides access to the credentials external -// configuration value. -type CredentialsValueProvider interface { - GetCredentialsValue() (aws.Credentials, error) -} - -// WithCredentialsValue provides wrapping of a credentials Value to satisfy the -// CredentialsValueProvider interface. -type WithCredentialsValue aws.Credentials - -// GetCredentialsValue returns the credentials value. -func (v WithCredentialsValue) GetCredentialsValue() (aws.Credentials, error) { - return aws.Credentials(v), nil -} - -// GetCredentialsValue searchds the Configs for a CredentialsValueProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func GetCredentialsValue(configs Configs) (aws.Credentials, bool, error) { - for _, cfg := range configs { - if p, ok := cfg.(CredentialsValueProvider); ok { - v, err := p.GetCredentialsValue() - if err != nil { - return aws.Credentials{}, false, err - } - if v.HasKeys() { - return v, true, nil - } - } - } - - return aws.Credentials{}, false, nil -} - -// CredentialsEndpointProvider provides access to the credentials endpoint -// external configuration value. -type CredentialsEndpointProvider interface { - GetCredentialsEndpoint() (string, error) -} - -// WithCredentialsEndpoint provides wrapping of a string to satisfy the -// CredentialsEndpointProvider interface. -type WithCredentialsEndpoint string - -// GetCredentialsEndpoint returns the endpoint. -func (p WithCredentialsEndpoint) GetCredentialsEndpoint() (string, error) { - return string(p), nil -} - -// GetCredentialsEndpoint searchds the Configs for a CredentialsEndpointProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func GetCredentialsEndpoint(configs Configs) (string, bool, error) { - for _, cfg := range configs { - if p, ok := cfg.(CredentialsEndpointProvider); ok { - v, err := p.GetCredentialsEndpoint() - if err != nil { - return "", false, err - } - if len(v) > 0 { - return v, true, nil - } - } - } - - return "", false, nil -} - -// ContainerCredentialsEndpointPathProvider provides access to the credentials endpoint path -// external configuration value. -type ContainerCredentialsEndpointPathProvider interface { - GetContainerCredentialsEndpointPath() (string, error) -} - -// WithContainerCredentialsEndpointPath provides wrapping of a string to satisfy the -// ContainerCredentialsEndpointPathProvider interface. -type WithContainerCredentialsEndpointPath string - -// GetContainerCredentialsEndpointPath returns the endpoint path. -func (p WithContainerCredentialsEndpointPath) GetContainerCredentialsEndpointPath() (string, error) { - return string(p), nil -} - -// GetContainerCredentialsEndpointPath searchds the Configs for a -// ContainerCredentialsEndpointPathProvider and returns the value if found. -// Returns an error if a provider fails before a -// value is found. -func GetContainerCredentialsEndpointPath(configs Configs) (string, bool, error) { - for _, cfg := range configs { - if p, ok := cfg.(ContainerCredentialsEndpointPathProvider); ok { - v, err := p.GetContainerCredentialsEndpointPath() - if err != nil { - return "", false, err - } - if len(v) > 0 { - return v, true, nil - } - } - } - - return "", false, nil -} - -// AssumeRoleConfigProvider provides access to the assume role config -// external configuration value. -type AssumeRoleConfigProvider interface { - GetAssumeRoleConfig() (AssumeRoleConfig, error) -} - -// WithAssumeRoleConfig provides wrapping of a string to satisfy the -// AssumeRoleConfigProvider interface. -type WithAssumeRoleConfig AssumeRoleConfig - -// GetAssumeRoleConfig returns the AssumeRoleConfig. -func (p WithAssumeRoleConfig) GetAssumeRoleConfig() (AssumeRoleConfig, error) { - return AssumeRoleConfig(p), nil -} - -// GetAssumeRoleConfig searchds the Configs for a AssumeRoleConfigProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func GetAssumeRoleConfig(configs Configs) (AssumeRoleConfig, bool, error) { - for _, cfg := range configs { - if p, ok := cfg.(AssumeRoleConfigProvider); ok { - v, err := p.GetAssumeRoleConfig() - if err != nil { - return AssumeRoleConfig{}, false, err - } - if len(v.RoleARN) > 0 && v.Source != nil { - return v, true, nil - } - } - } - - return AssumeRoleConfig{}, false, nil -} - -// MFATokenFuncProvider provides access to the MFA token function needed for -// Assume Role with MFA. -type MFATokenFuncProvider interface { - GetMFATokenFunc() (func() (string, error), error) -} - -// WithMFATokenFunc provides wrapping of a string to satisfy the -// MFATokenFuncProvider interface. -type WithMFATokenFunc func() (string, error) - -// GetMFATokenFunc returns the MFA Token function. -func (p WithMFATokenFunc) GetMFATokenFunc() (func() (string, error), error) { - return p, nil -} - -// GetMFATokenFunc searchds the Configs for a MFATokenFuncProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func GetMFATokenFunc(configs Configs) (func() (string, error), bool, error) { - for _, cfg := range configs { - if p, ok := cfg.(MFATokenFuncProvider); ok { - v, err := p.GetMFATokenFunc() - if err != nil { - return nil, false, err - } - if v != nil { - return v, true, nil - } - } - } - - return nil, false, nil -} - -// WithEC2MetadataRegion provides a RegionProvider that retrieves the region -// from the EC2 Metadata service. -// -// TODO add this provider to the default config loading? -type WithEC2MetadataRegion struct { - Client *ec2metadata.EC2Metadata -} - -// GetRegion attempts to retreive the region from EC2 Metadata service. -func (p WithEC2MetadataRegion) GetRegion() (string, error) { - return p.Client.Region() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/resolve.go deleted file mode 100644 index 9eff51ad..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/resolve.go +++ /dev/null @@ -1,216 +0,0 @@ -package external - -import ( - "fmt" - "net/http" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - "github.com/aws/aws-sdk-go-v2/aws/ec2metadata" - "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds" - "github.com/aws/aws-sdk-go-v2/aws/endpointcreds" - "github.com/aws/aws-sdk-go-v2/aws/stscreds" - "github.com/aws/aws-sdk-go-v2/service/sts" -) - -// ResolveDefaultAWSConfig will write default configuration values into the cfg -// value. It will write the default values, overwriting any previous value. -// -// This should be used as the first resolver in the slice of resolvers when -// resolving external configuration. -func ResolveDefaultAWSConfig(cfg *aws.Config, configs Configs) error { - *cfg = defaults.Config() - return nil -} - -// ResolveCustomCABundle extracts the first instance of a custom CA bundle filename -// from the external configurations. It will update the HTTP Client's builder -// to be configured with the custom CA bundle. -// -// Config provider used: -// * CustomCABundleProvider -func ResolveCustomCABundle(cfg *aws.Config, configs Configs) error { - v, found, err := GetCustomCABundle(configs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - return addHTTPClientCABundle(cfg.HTTPClient, v) -} - -// ResolveRegion extracts the first instance of a Region from the Configs slice. -// -// Config providers used: -// * RegionProvider -func ResolveRegion(cfg *aws.Config, configs Configs) error { - v, found, err := GetRegion(configs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - cfg.Region = v - return nil -} - -// ResolveCredentialsValue extracts the first instance of Credentials from the -// config slices. -// -// Config providers used: -// * CredentialsValueProvider -func ResolveCredentialsValue(cfg *aws.Config, configs Configs) error { - v, found, err := GetCredentialsValue(configs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - cfg.Credentials = aws.StaticCredentialsProvider{Value: v} - - return nil -} - -// ResolveEndpointCredentials will extract the credentials endpoint from the config -// slice. Using the endpoint, provided, to create a endpoint credential provider. -// -// Config providers used: -// * CredentialsEndpointProvider -func ResolveEndpointCredentials(cfg *aws.Config, configs Configs) error { - v, found, err := GetCredentialsEndpoint(configs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - if err := validateLocalURL(v); err != nil { - return err - } - - cfgCp := cfg.Copy() - cfgCp.EndpointResolver = aws.ResolveWithEndpointURL(v) - - provider := endpointcreds.New(cfgCp) - provider.ExpiryWindow = 5 * time.Minute - - cfg.Credentials = provider - - return nil -} - -const containerCredentialsEndpoint = "http://169.254.170.2" - -// ResolveContainerEndpointPathCredentials will extract the container credentials -// endpoint from the config slice. Using the endpoint provided, to create a -// endpoint credential provider. -// -// Config providers used: -// * ContainerCredentialsEndpointPathProvider -func ResolveContainerEndpointPathCredentials(cfg *aws.Config, configs Configs) error { - v, found, err := GetContainerCredentialsEndpointPath(configs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - cfgCp := cfg.Copy() - - v = containerCredentialsEndpoint + v - cfgCp.EndpointResolver = aws.ResolveWithEndpointURL(v) - - provider := endpointcreds.New(cfgCp) - provider.ExpiryWindow = 5 * time.Minute - - cfg.Credentials = provider - - return nil -} - -// ResolveAssumeRoleCredentials extracts the assume role configuration from -// the external configurations. -// -// Config providers used: -func ResolveAssumeRoleCredentials(cfg *aws.Config, configs Configs) error { - v, found, err := GetAssumeRoleConfig(configs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - cfgCp := cfg.Copy() - // TODO support additional credential providers that are already set? - cfgCp.Credentials = aws.StaticCredentialsProvider{Value: v.Source.Credentials} - - provider := stscreds.NewAssumeRoleProvider( - sts.New(cfgCp), v.RoleARN, - ) - provider.RoleSessionName = v.RoleSessionName - - if id := v.ExternalID; len(id) > 0 { - provider.ExternalID = aws.String(id) - } - if len(v.MFASerial) > 0 { - tp, foundTP, err := GetMFATokenFunc(configs) - if err != nil { - return err - } - if !foundTP { - return fmt.Errorf("token provider required for AssumeRole with MFA") - } - provider.SerialNumber = aws.String(v.MFASerial) - provider.TokenProvider = tp - } - - cfg.Credentials = provider - - return nil -} - -// ResolveFallbackEC2Credentials will configure the AWS config credentials to -// use EC2 Instance Role always. -func ResolveFallbackEC2Credentials(cfg *aws.Config, configs Configs) error { - cfgCp := cfg.Copy() - cfgCp.HTTPClient = shallowCopyHTTPClient(cfgCp.HTTPClient) - cfgCp.HTTPClient.Timeout = 5 * time.Second - - provider := ec2rolecreds.NewProvider(ec2metadata.New(cfgCp)) - provider.ExpiryWindow = 5 * time.Minute - - cfg.Credentials = provider - - return nil -} - -func shallowCopyHTTPClient(client *http.Client) *http.Client { - return &http.Client{ - Transport: client.Transport, - CheckRedirect: client.CheckRedirect, - Jar: client.Jar, - Timeout: client.Timeout, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/shared_config.go deleted file mode 100644 index c6dc6241..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/shared_config.go +++ /dev/null @@ -1,425 +0,0 @@ -package external - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/internal/ini" -) - -const ( - // Static Credentials group - accessKeyIDKey = `aws_access_key_id` // group required - secretAccessKey = `aws_secret_access_key` // group required - sessionTokenKey = `aws_session_token` // optional - - // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional - - // Additional Config fields - regionKey = `region` -) - -// DefaultSharedConfigProfile is the default profile to be used when -// loading configuration from the config files if another profile name -// is not provided. -var DefaultSharedConfigProfile = `default` - -// DefaultSharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func DefaultSharedCredentialsFilename() string { - return filepath.Join(userHomeDir(), ".aws", "credentials") -} - -// DefaultSharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func DefaultSharedConfigFilename() string { - return filepath.Join(userHomeDir(), ".aws", "config") -} - -// DefaultSharedConfigFiles is a slice of the default shared config files that -// the will be used in order to load the SharedConfig. -var DefaultSharedConfigFiles = []string{ - DefaultSharedCredentialsFilename(), - DefaultSharedConfigFilename(), -} - -// AssumeRoleConfig provides the values defining the configuration for an IAM -// assume role. -type AssumeRoleConfig struct { - RoleARN string - ExternalID string - MFASerial string - RoleSessionName string - - sourceProfile string - Source *SharedConfig -} - -// SharedConfig represents the configuration fields of the SDK config files. -type SharedConfig struct { - Profile string - - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. - // - // aws_access_key_id - // aws_secret_access_key - // aws_session_token - Credentials aws.Credentials - - AssumeRole AssumeRoleConfig - - // Region is the region the SDK should use for looking up AWS service endpoints - // and signing requests. - // - // region - Region string -} - -// GetRegion returns the region for the profile if a region is set. -func (c SharedConfig) GetRegion() (string, error) { - return c.Region, nil -} - -// GetCredentialsValue returns the credentials for a profile if they were set. -func (c SharedConfig) GetCredentialsValue() (aws.Credentials, error) { - return c.Credentials, nil -} - -// GetAssumeRoleConfig returns the assume role config for a profile. Will be -// a zero value if not set. -func (c SharedConfig) GetAssumeRoleConfig() (AssumeRoleConfig, error) { - return c.AssumeRole, nil -} - -// LoadSharedConfigIgnoreNotExist is an alias for LoadSharedConfig with the -// addition of ignoring when none of the files exist or when the profile -// is not found in any of the files. -func LoadSharedConfigIgnoreNotExist(configs Configs) (Config, error) { - cfg, err := LoadSharedConfig(configs) - if err != nil { - if _, ok := err.(SharedConfigNotExistErrors); ok { - return SharedConfig{}, nil - } - return nil, err - } - - return cfg, nil -} - -// LoadSharedConfig uses the Configs passed in to load the SharedConfig from file -// The file names and profile name are sourced from the Configs. -// -// If profile name is not provided DefaultSharedConfigProfile (default) will -// be used. -// -// If shared config filenames are not provided DefaultSharedConfigFiles will -// be used. -// -// Config providers used: -// * SharedConfigProfileProvider -// * SharedConfigFilesProvider -func LoadSharedConfig(configs Configs) (Config, error) { - var profile string - var files []string - var ok bool - var err error - - profile, ok, err = GetSharedConfigProfile(configs) - if err != nil { - return nil, err - } - if !ok { - profile = DefaultSharedConfigProfile - } - - files, ok, err = GetSharedConfigFiles(configs) - if err != nil { - return nil, err - } - if !ok { - files = DefaultSharedConfigFiles - } - - return NewSharedConfig(profile, files) -} - -// NewSharedConfig retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine -// precedence. Values in subsequent files will overwrite values defined in -// earlier files. -// -// For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. -func NewSharedConfig(profile string, filenames []string) (SharedConfig, error) { - if len(filenames) == 0 { - return SharedConfig{}, fmt.Errorf("no shared config files provided") - } - - files, err := loadSharedConfigIniFiles(filenames) - if err != nil { - return SharedConfig{}, err - } - - cfg := SharedConfig{} - if err = cfg.setFromIniFiles(profile, files); err != nil { - return SharedConfig{}, err - } - - if len(cfg.AssumeRole.sourceProfile) > 0 { - if err := cfg.setAssumeRoleSource(profile, files); err != nil { - return SharedConfig{}, err - } - } - - return cfg, nil -} - -type sharedConfigFile struct { - Filename string - IniData ini.Sections -} - -func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { - files := make([]sharedConfigFile, 0, len(filenames)) - - errs := SharedConfigNotExistErrors{} - for _, filename := range filenames { - sections, err := ini.OpenFile(filename) - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { - errs = append(errs, - SharedConfigFileNotExistError{Filename: filename, Err: err}, - ) - // Skip files which can't be opened and read for whatever reason - continue - } else if err != nil { - return nil, SharedConfigLoadError{Filename: filename, Err: err} - } - - files = append(files, sharedConfigFile{ - Filename: filename, IniData: sections, - }) - } - - if len(files) == 0 { - return nil, errs - } - - return files, nil -} - -func (c *SharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { - var assumeRoleSrc SharedConfig - - // Multiple level assume role chains are not support - if c.AssumeRole.sourceProfile == origProfile { - assumeRoleSrc = *c - assumeRoleSrc.AssumeRole = AssumeRoleConfig{} - } else { - err := assumeRoleSrc.setFromIniFiles(c.AssumeRole.sourceProfile, files) - if err != nil { - return SharedConfigAssumeRoleError{ - Profile: c.Profile, - RoleARN: c.AssumeRole.RoleARN, - Err: err, - } - } - } - - if len(assumeRoleSrc.Credentials.AccessKeyID) == 0 { - return SharedConfigAssumeRoleError{ - Profile: c.Profile, - RoleARN: c.AssumeRole.RoleARN, - Err: fmt.Errorf("source profile has no shared credentials"), - } - } - - c.AssumeRole.Source = &assumeRoleSrc - - return nil -} - -// Returns an error if all of the files fail to load. If at least one file is -// successfully loaded and contains the profile, no error will be returned. -func (c *SharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { - c.Profile = profile - - existErrs := SharedConfigNotExistErrors{} - for _, f := range files { - if err := c.setFromIniFile(profile, f); err != nil { - if _, ok := err.(SharedConfigProfileNotExistError); ok { - existErrs = append(existErrs, err) - continue - } - return err - } - } - - if len(existErrs) == len(files) { - return existErrs - } - - return nil -} - -// setFromFile loads the configuration from the file using -// the profile provided. A SharedConfig pointer type value is used so that -// multiple config file loadings can be chained. -// -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (c *SharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { - section, ok := file.IniData.GetSection(profile) - if !ok { - // Fallback to to alternate profile name: profile - section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if !ok { - return SharedConfigProfileNotExistError{ - Filename: file.Filename, - Profile: profile, - Err: nil, - } - } - } - - // Shared Credentials - akid := section.String(accessKeyIDKey) - secret := section.String(secretAccessKey) - if len(akid) > 0 && len(secret) > 0 { - c.Credentials = aws.Credentials{ - AccessKeyID: akid, - SecretAccessKey: secret, - SessionToken: section.String(sessionTokenKey), - Source: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), - } - } - - // Assume Role - roleArn := section.String(roleArnKey) - srcProfile := section.String(sourceProfileKey) - if len(roleArn) > 0 && len(srcProfile) > 0 { - c.AssumeRole = AssumeRoleConfig{ - RoleARN: roleArn, - ExternalID: section.String(externalIDKey), - MFASerial: section.String(mfaSerialKey), - RoleSessionName: section.String(roleSessionNameKey), - - sourceProfile: srcProfile, - } - } - - // Region - if v := section.String(regionKey); len(v) > 0 { - c.Region = v - } - - return nil -} - -// SharedConfigNotExistErrors provides an error type for failure to load shared -// config because resources do not exist. -type SharedConfigNotExistErrors []error - -func (es SharedConfigNotExistErrors) Error() string { - msg := "failed to load shared config\n" - for _, e := range es { - msg += "\t" + e.Error() - } - return msg -} - -// SharedConfigLoadError is an error for the shared config file failed to load. -type SharedConfigLoadError struct { - Filename string - Err error -} - -// Cause is the underlying error that caused the failure. -func (e SharedConfigLoadError) Cause() error { - return e.Err -} - -func (e SharedConfigLoadError) Error() string { - return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err) -} - -// SharedConfigFileNotExistError is an error for the shared config when -// the filename does not exist. -type SharedConfigFileNotExistError struct { - Filename string - Profile string - Err error -} - -// Cause is the underlying error that caused the failure. -func (e SharedConfigFileNotExistError) Cause() error { - return e.Err -} - -func (e SharedConfigFileNotExistError) Error() string { - return fmt.Sprintf("failed to open shared config file, %s, %v", e.Filename, e.Err) -} - -// SharedConfigProfileNotExistError is an error for the shared config when -// the profile was not find in the config file. -type SharedConfigProfileNotExistError struct { - Filename string - Profile string - Err error -} - -// Cause is the underlying error that caused the failure. -func (e SharedConfigProfileNotExistError) Cause() error { - return e.Err -} - -func (e SharedConfigProfileNotExistError) Error() string { - return fmt.Sprintf("failed to get shared config profile, %s, in %s, %v", e.Profile, e.Filename, e.Err) -} - -// SharedConfigAssumeRoleError is an error for the shared config when the -// profile contains assume role information, but that information is invalid -// or not complete. -type SharedConfigAssumeRoleError struct { - Profile string - RoleARN string - Err error -} - -func (e SharedConfigAssumeRoleError) Error() string { - return fmt.Sprintf("failed to load assume role %s, of profile %s, %v", - e.RoleARN, e.Profile, e.Err) -} - -func userHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") - } - - // *nix - return os.Getenv("HOME") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/handlers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/handlers.go deleted file mode 100644 index be2b4535..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/handlers.go +++ /dev/null @@ -1,256 +0,0 @@ -package aws - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList - Complete HandlerList -} - -// Copy returns of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - Complete: h.Complete.copy(), - } -} - -// Clear removes callback functions for all handlers -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() - h.Complete.Clear() -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - if len(l.list) == 0 { - return n - } - - n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = l.list[0:0] -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.PushBackNamed(NamedHandler{"__anonymous", f}) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - if cap(l.list) == 0 { - l.list = make([]NamedHandler, 0, 5) - } - l.list = append(l.list, n) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.PushFrontNamed(NamedHandler{"__anonymous", f}) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - if cap(l.list) == len(l.list) { - // Allocating new list required - l.list = append([]NamedHandler{n}, l.list...) - } else { - // Enough room to prepend into list. - l.list = append(l.list, NamedHandler{}) - copy(l.list[1:], l.list) - l.list[0] = n - } -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - l.RemoveByName(n.Name) -} - -// RemoveByName removes a NamedHandler by name. -func (l *HandlerList) RemoveByName(name string) { - for i := 0; i < len(l.list); i++ { - m := l.list[i] - if m.Name == name { - // Shift array preventing creating new arrays - copy(l.list[i:], l.list[i+1:]) - l.list[len(l.list)-1] = NamedHandler{} - l.list = l.list[:len(l.list)-1] - - // decrement list so next check to length is correct - i-- - } - } -} - -// SwapNamed will swap out any existing handlers with the same name as the -// passed in NamedHandler returning true if handlers were swapped. False is -// returned otherwise. -func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == n.Name { - l.list[i].Fn = n.Fn - swapped = true - } - } - - return swapped -} - -// SetBackNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the end of the list. -func (l *HandlerList) SetBackNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushBackNamed(n) - } -} - -// SetFrontNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the beginning of -// the list. -func (l *HandlerList) SetFrontNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushFrontNamed(n) - } -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// WithAppendUserAgent will add a string to the user agent prefixed with a -// single white space. -func WithAppendUserAgent(s string) Option { - return func(r *Request) { - r.Handlers.Build.PushBack(func(r2 *Request) { - AddToUserAgent(r, s) - }) - } -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/http_request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/http_request.go deleted file mode 100644 index 40dfe40d..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/http_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package aws - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := new(http.Request) - *req = *r - req.URL = &url.URL{} - *req.URL = *r.URL - req.Body = body - - req.Header = http.Header{} - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/jsonvalue.go deleted file mode 100644 index 91a6f277..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/jsonvalue.go +++ /dev/null @@ -1,12 +0,0 @@ -package aws - -// JSONValue is a representation of a grab bag type that will be marshaled -// into a json string. This type can be used just like any other map. -// -// Example: -// -// values := aws.JSONValue{ -// "Foo": "Bar", -// } -// values["Baz"] = "Qux" -type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logger.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logger.go deleted file mode 100644 index 189e5201..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/logger.go +++ /dev/null @@ -1,97 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevel defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevel uint - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nil, will default to LogOff comparison. -func (l LogLevel) Matches(v LogLevel) bool { - return l&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default -// to LogOff comparison. -func (l LogLevel) AtLeast(v LogLevel) bool { - return l >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevel = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevel = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -// -// TODO remove, moved to default pkg -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/offset_reader.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/offset_reader.go deleted file mode 100644 index cb4614cd..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/offset_reader.go +++ /dev/null @@ -1,58 +0,0 @@ -package aws - -import ( - "io" - "sync" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.Mutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { - reader := &offsetReader{} - buf.Seek(offset, 0) - - reader.buf = buf - return reader -} - -// Close will close the instance of the offset reader's access to -// the underlying io.ReadSeeker. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read of the underlying io.ReadSeeker -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.Lock() - defer o.lock.Unlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// Seek is a thread-safe seeking operation. -func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { - o.lock.Lock() - defer o.lock.Unlock() - - return o.buf.Seek(offset, whence) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go deleted file mode 100644 index b180eb3b..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go +++ /dev/null @@ -1,587 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "io" - "net" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -const ( - // ErrCodeSerialization is the serialization error code that is received - // during protocol unmarshaling. - ErrCodeSerialization = "SerializationError" - - // ErrCodeRead is an error that is returned during HTTP reads. - ErrCodeRead = "ReadError" - - // ErrCodeResponseTimeout is the connection timeout error that is received - // during body reads. - ErrCodeResponseTimeout = "ResponseTimeout" - - // ErrCodeRequestCanceled is the error code that will be returned by an - // API request that was canceled. Requests given a Context may - // return this error when canceled. - ErrCodeRequestCanceled = "RequestCanceled" -) - -// A Request is the service request to be made. -type Request struct { - Config Config - Metadata Metadata - Handlers Handlers - - Retryer - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time - DisableFollowRedirects bool - - context Context - - built bool - - // Need to persist an intermediate body between the input Body and HTTP - // request body because the HTTP Client's transport can maintain a reference - // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and wrap the input Body for each HTTP request. - safeBody *offsetReader -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator - - BeforePresignFn func(r *Request) error -} - -// New returns a new Request pointer for the service API -// operation and parameters. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg Config, metadata Metadata, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - // TODO improve this experiance for config copy? - cfg = cfg.Copy() - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - // TODO need better way of handling this error... NewRequest should return error. - endpoint, err := cfg.EndpointResolver.ResolveEndpoint(metadata.ServiceName, cfg.Region) - if err == nil { - // TODO so ugly - metadata.Endpoint = endpoint.URL - if len(endpoint.SigningName) > 0 && !endpoint.SigningNameDerived { - metadata.SigningName = endpoint.SigningName - } - if len(endpoint.SigningRegion) > 0 { - metadata.SigningRegion = endpoint.SigningRegion - } - - httpReq.URL, err = url.Parse(endpoint.URL + operation.HTTPPath) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - } - - r := &Request{ - Config: cfg, - Metadata: metadata, - Handlers: handlers.Copy(), - - Retryer: retryer, - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// A Option is a functional option that can augment or modify a request when -// using a WithContext API operation method. -type Option func(*Request) - -// WithGetResponseHeader builds a request Option which will retrieve a single -// header value from the HTTP Response. If there are multiple values for the -// header key use WithGetResponseHeaders instead to access the http.Header -// map directly. The passed in val pointer must be non-nil. -// -// This Option can be used multiple times with a single API operation. -// -// var id2, versionID string -// svc.PutObjectWithContext(ctx, params, -// request.WithGetResponseHeader("x-amz-id-2", &id2), -// request.WithGetResponseHeader("x-amz-version-id", &versionID), -// ) -func WithGetResponseHeader(key string, val *string) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *val = req.HTTPResponse.Header.Get(key) - }) - } -} - -// WithGetResponseHeaders builds a request Option which will retrieve the -// headers from the HTTP response and assign them to the passed in headers -// variable. The passed in headers pointer must be non-nil. -// -// var headers http.Header -// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) -func WithGetResponseHeaders(headers *http.Header) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *headers = req.HTTPResponse.Header - }) - } -} - -// WithLogLevel is a request option that will set the request to use a specific -// log level when the request is made. -// -// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(LogDebugWithHTTPBody) -func WithLogLevel(l LogLevel) Option { - return func(r *Request) { - r.Config.LogLevel = l - } -} - -// ApplyOptions will apply each option to the request calling them in the order -// the were provided. -func (r *Request) ApplyOptions(opts ...Option) { - for _, opt := range opts { - opt(r) - } -} - -// Context will always returns a non-nil context. If Request does not have a -// context BackgroundContext will be returned. -func (r *Request) Context() Context { - if r.context != nil { - return r.context - } - return BackgroundContext() -} - -// SetContext adds a Context to the current request that can be used to cancel -// a in-flight request. The Context value must not be nil, or this method will -// panic. -// -// Unlike http.Request.WithContext, SetContext does not return a copy of the -// Request. It is not safe to use use a single Request value for multiple -// requests. A new Request should be created for each API operation request. -// -// Go 1.6 and below: -// The http.Request's Cancel field will be set to the Done() value of -// the context. This will overwrite the Cancel field's value. -// -// Go 1.7 and above: -// The http.Request.WithContext will be used to set the context on the underlying -// http.Request. This will create a shallow copy of the http.Request. The SDK -// may create sub contexts in the future for nested requests such as retries. -func (r *Request) SetContext(ctx Context) { - if ctx == nil { - panic("context cannot be nil") - } - setRequestContext(r, ctx) -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - return r.Error != nil && BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.Body = reader - r.ResetBody() -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime - r.NotHoist = false - - if r.Operation.BeforePresignFn != nil { - r = r.copy() - err := r.Operation.BeforePresignFn(r) - if err != nil { - return "", err - } - } - - r.Sign() - if r.Error != nil { - return "", r.Error - } - return r.HTTPRequest.URL.String(), nil -} - -// PresignRequest behaves just like presign, with the addition of returning a -// set of headers that were signed. -// -// Returns the URL string for the API operation with signature in the query string, -// and the HTTP headers that were included in the signature. These headers must -// be included in any HTTP request made with the presigned URL. -// -// To prevent hoisting any headers to the query string set NotHoist to true on -// this Request value prior to calling PresignRequest. -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.Sign() - if r.Error != nil { - return "", nil, r.Error - } - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -func debugLogReqError(r *Request, stage string, retrying bool, err error) { - if !r.Config.LogLevel.Matches(LogDebugWithRequestErrors) { - return - } - - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.Metadata.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request returning error if errors are encountered. -// -// Send will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - - r.Handlers.Sign.Run(r) - return r.Error -} - -func (r *Request) getNextRequestBody() (io.ReadCloser, error) { - if r.safeBody != nil { - r.safeBody.Close() - } - - r.safeBody = newOffsetReader(r.Body, r.BodyStart) - - // Go 1.8 tightened and clarified the rules code needs to use when building - // requests with the http package. Go 1.8 removed the automatic detection - // of if the Request.Body was empty, or actually had bytes in it. The SDK - // always sets the Request.Body even if it is empty and should not actually - // be sent. This is incorrect. - // - // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http - // client that the request really should be sent without a body. The - // Request.Body cannot be set to nil, which is preferable, because the - // field is exported and could introduce nil pointer dereferences for users - // of the SDK if they used that field. - // - // Related golang/go#18257 - l, err := computeBodyLength(r.Body) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) - } - - var body io.ReadCloser - if l == 0 { - body = NoBody - } else if l > 0 { - body = r.safeBody - } else { - // Hack to prevent sending bodies for methods where the body - // should be ignored by the server. Sending bodies on these - // methods without an associated ContentLength will cause the - // request to socket timeout because the server does not handle - // Transfer-Encoding: chunked bodies for these methods. - // - // This would only happen if a ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker. - switch r.Operation.HTTPMethod { - case "GET", "HEAD", "DELETE": - body = NoBody - default: - body = r.safeBody - } - } - - return body, nil -} - -// Attempts to compute the length of the body of the reader using the -// io.Seeker interface. If the value is not seekable because of being -// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. -// If no error occurs the length of the body will be returned. -func computeBodyLength(r io.ReadSeeker) (int64, error) { - seekable := true - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := r.(type) { - case ReaderSeekerCloser: - seekable = v.IsSeeker() - case *ReaderSeekerCloser: - seekable = v.IsSeeker() - } - if !seekable { - return -1, nil - } - - curOffset, err := r.Seek(0, 1) - if err != nil { - return 0, err - } - - endOffset, err := r.Seek(0, 2) - if err != nil { - return 0, err - } - - _, err = r.Seek(curOffset, 0) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - -// GetBody will return an io.ReadSeeker of the Request's underlying -// input body with a concurrency safe wrapper. -func (r *Request) GetBody() io.ReadSeeker { - return r.safeBody -} - -// Send will send the request returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -// -// Send will not close the request.Request's body. -func (r *Request) Send() error { - defer func() { - // Regardless of success or failure of the request trigger the Complete - // request handlers. - r.Handlers.Complete.Run(r) - }() - - for { - if BoolValue(r.Retryable) { - if r.Config.LogLevel.Matches(LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.Metadata.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - } - - r.Sign() - if r.Error != nil { - return r.Error - } - - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - if !shouldRetryCancel(r) { - return r.Error - } - - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", false, err) - return r.Error - } - debugLogReqError(r, "Send Request", true, err) - continue - } - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - err := r.Error - - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Response", false, err) - return r.Error - } - debugLogReqError(r, "Validate Response", true, err) - continue - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, err) - return r.Error - } - debugLogReqError(r, "Unmarshal Response", true, err) - continue - } - - break - } - - return nil -} - -// copy will copy a request which will allow for local manipulation of the -// request. -func (r *Request) copy() *Request { - req := &Request{} - *req = *r - req.Handlers = r.Handlers.Copy() - op := *r.Operation - req.Operation = &op - return req -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} - -func shouldRetryCancel(r *Request) bool { - awsErr, ok := r.Error.(awserr.Error) - timeoutErr := false - errStr := r.Error.Error() - if ok { - if awsErr.Code() == ErrCodeRequestCanceled { - return false - } - err := awsErr.OrigErr() - netErr, netOK := err.(net.Error) - timeoutErr = netOK && netErr.Temporary() - if urlErr, ok := err.(*url.Error); !timeoutErr && ok { - errStr = urlErr.Err.Error() - } - } - - // There can be two types of canceled errors here. - // The first being a net.Error and the other being an error. - // If the request was timed out, we want to continue the retry - // process. Otherwise, return the canceled error. - return timeoutErr || - (errStr != "net/http: request canceled" && - errStr != "net/http: request canceled while waiting for connection") - -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_7.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_7.go deleted file mode 100644 index 6db88c86..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_7.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !go1.8 - -package aws - -import "io" - -// NoBody is an io.ReadCloser with no bytes. Read always returns EOF -// and Close always returns nil. It can be used in an outgoing client -// request to explicitly signal that a request has zero bytes. -// An alternative, however, is to simply set Request.Body to nil. -// -// Copy of Go 1.8 NoBody type from net/http/http.go -type noBody struct{} - -func (noBody) Read([]byte) (int, error) { return 0, io.EOF } -func (noBody) Close() error { return nil } -func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } - -// NoBody is an empty reader that will trigger the Go HTTP client to not include -// and body in the HTTP request. -var NoBody = noBody{} - -// ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_8.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_8.go deleted file mode 100644 index ad81c3d8..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_8.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.8 - -package aws - -import ( - "net/http" -) - -// NoBody is a http.NoBody reader instructing Go HTTP client to not include -// and body in the HTTP request. -var NoBody = http.NoBody - -// ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -// -// Will also set the Go 1.8's http.Request.GetBody member to allow retrying -// PUT/POST redirects. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body - r.HTTPRequest.GetBody = r.getNextRequestBody -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context.go deleted file mode 100644 index 19fa4fbb..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.7 - -package aws - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx Context) { - r.context = ctx - r.HTTPRequest = r.HTTPRequest.WithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context_1_6.go deleted file mode 100644 index 0b333d25..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context_1_6.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !go1.7 - -package aws - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx Context) { - r.context = ctx - r.HTTPRequest.Cancel = ctx.Done() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_pagination.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_pagination.go deleted file mode 100644 index 15594c90..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_pagination.go +++ /dev/null @@ -1,187 +0,0 @@ -package aws - -import ( - "sync/atomic" - - "github.com/aws/aws-sdk-go-v2/internal/awsutil" -) - -// A Pager provides paginating of SDK API operations which are paginatable. -// Generally you should not use this type directly, but use the "Pages" API -// operations method to automatically perform pagination for you. Such as, -// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. -// -// Pagier differs from a Paginator type in that pagination is the type that -// does the pagination between API operations, and Paginator defines the -// configuration that will be used per page request. -// -// for p.Next() { -// data := p.CurrentPage().(*s3.ListObjectsOutput) -// // process the page's data -// } -// return p.Err() -// -// See service client API operation Pages methods for examples how the SDK will -// use the Pager type. -type Pager struct { - // Function to return a Request value for each pagination request. - // Any configuration or handlers that need to be applied to the request - // prior to getting the next page should be done here before the request - // returned. - // - // NewRequest should always be built from the same API operations. It is - // undefined if different API operations are returned on subsequent calls. - NewRequest func() (*Request, error) - - started bool - nextTokens []interface{} - - err error - curPage interface{} -} - -// hasNextPage will return true if Pager is able to determine that the API -// operation has additional pages. False will be returned if there are no more -// pages remaining. -// -// Will always return true if Next has not been called yet. -func (p *Pager) hasNextPage() bool { - return !(p.started && len(p.nextTokens) == 0) -} - -// Err returns the error Pager encountered when retrieving the next page. -func (p *Pager) Err() error { - return p.err -} - -// CurrentPage returns the current page. Page should only be called after a successful -// call to Next. It is undefined what Page will return if Page is called after -// Next returns false. -func (p *Pager) CurrentPage() interface{} { - return p.curPage -} - -// Next will attempt to retrieve the next page for the API operation. When a page -// is retrieved true will be returned. If the page cannot be retrieved, or there -// are no more pages false will be returned. -// -// Use the Page method to retrieve the current page data. The data will need -// to be cast to the API operation's output type. -// -// Use the Err method to determine if an error occurred if Page returns false. -func (p *Pager) Next() bool { - if !p.hasNextPage() { - return false - } - - req, err := p.NewRequest() - if err != nil { - p.err = err - return false - } - - if p.started { - for i, intok := range req.Operation.InputTokens { - awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) - } - } - p.started = true - - err = req.Send() - if err != nil { - p.err = err - return false - } - - p.nextTokens = req.nextPageTokens() - p.curPage = req.Data - - return true -} - -// A Paginator is the configuration data that defines how an API operation -// should be paginated. This type is used by the API service models to define -// the generated pagination config for service APIs. -// -// The Pager type is what provides iterating between pages of an API. It -// is only used to store the token metadata the SDK should use for performing -// pagination. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// nextPageTokens returns the tokens to use when asking for the next page of data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !BoolValue(v) { - return nil - } - case bool: - if v == false { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - vs, _ := awsutil.ValuesAtPath(r.Data, outToken) - - if len(vs) == 0 { - tokens = append(tokens, nil) - continue - } - v := vs[0] - - switch tv := v.(type) { - case *string: - if len(StringValue(tv)) == 0 { - tokens = append(tokens, nil) - continue - } - case string: - if len(tv) == 0 { - tokens = append(tokens, nil) - continue - } - } - - tokenAdded = true - tokens = append(tokens, v) - } - if !tokenAdded { - return nil - } - - return tokens -} - -// Ensure a deprecated item is only logged once instead of each time its used. -func logDeprecatedf(logger Logger, flag *int32, msg string) { - if logger == nil { - return - } - if atomic.CompareAndSwapInt32(flag, 0, 1) { - logger.Log(msg) - } -} - -var ( - logDeprecatedHasNextPage int32 - logDeprecatedNextPage int32 - logDeprecatedEachPage int32 -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/response.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/response.go deleted file mode 100644 index ea7717bf..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/response.go +++ /dev/null @@ -1,8 +0,0 @@ -package aws - -// Response provides the response meta data for a SDK API request's response. -type Response struct { - // TODO these fields should be focused on response, not just embedded request value. - // Need refactor of request for this to be better. - Request *Request -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go deleted file mode 100644 index a71b9699..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go +++ /dev/null @@ -1,160 +0,0 @@ -package aws - -import ( - "time" - - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the client.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. -type Retryer interface { - RetryRules(*Request) time.Duration - ShouldRetry(*Request) bool - MaxRetries() int -} - -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. -func WithRetryer(cfg *Config, retryer Retryer) *Config { - cfg.Retryer = retryer - return cfg -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, - ErrCodeResponseTimeout: {}, - "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "TooManyRequestsException": {}, // Lambda functions - "PriorRequestNotComplete": {}, // Route53 -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -var validParentCodes = map[string]struct{}{ - ErrCodeSerialization: {}, - ErrCodeRead: {}, -} - -type temporaryError interface { - Temporary() bool -} - -func isNestedErrorRetryable(parentErr awserr.Error) bool { - if parentErr == nil { - return false - } - - if _, ok := validParentCodes[parentErr.Code()]; !ok { - return false - } - - err := parentErr.OrigErr() - if err == nil { - return false - } - - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) - } - - if t, ok := err.(temporaryError); ok { - return t.Temporary() || isErrConnectionReset(err) - } - - return isErrConnectionReset(err) -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if error is nil. -func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) - } - } - return false -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if error is nil. -func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } - } - return false -} - -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. -func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } - } - return false -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorRetryable -func (r *Request) IsErrorRetryable() bool { - return IsErrorRetryable(r.Error) -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -// -// Alias for the utility function IsErrorThrottle -func (r *Request) IsErrorThrottle() bool { - return IsErrorThrottle(r.Error) -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorExpiredCreds -func (r *Request) IsErrorExpired() bool { - return IsErrorExpiredCreds(r.Error) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/header_rules.go deleted file mode 100644 index 244c86da..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// whitelist is a generic rule for whitelisting -type whitelist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// blacklist is a generic rule for blacklisting -type blacklist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/options.go deleted file mode 100644 index 6aa2ed24..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/options.go +++ /dev/null @@ -1,7 +0,0 @@ -package v4 - -// WithUnsignedPayload will enable and set the UnsignedPayload field to -// true of the signer. -func WithUnsignedPayload(v4 *Signer) { - v4.UnsignedPayload = true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/uri_path.go deleted file mode 100644 index bd082e9d..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/uri_path.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build go1.5 - -package v4 - -import ( - "net/url" - "strings" -) - -func getURIPath(u *url.URL) string { - var uri string - - if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") - } else { - uri = u.EscapedPath() - } - - if len(uri) == 0 { - uri = "/" - } - - return uri -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go deleted file mode 100644 index ddbade68..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go +++ /dev/null @@ -1,755 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -// -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. -// -// Standalone Signer -// -// Generally using the signer outside of the SDK should not require any additional -// logic when using Go v1.5 or higher. The signer does this by taking advantage -// of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. -// -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: -// -// "///" -// -// // e.g. -// "//example.com/some/path" -// -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. If you're using Go v1.4 you must set -// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with -// Go v1.5 the signer will fallback to URL.Path. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath -// -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. -// -// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 -// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the -// request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP -// message. URL.Opaque generally will force Go to make requests with absolute URL. -// URL.RawPath does not do this, but RawPath must be a valid escaping of Path -// or url.EscapedPath will ignore the RawPath escaping. -// -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/private/protocol/rest" -) - -const ( - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" - - // emptyStringSHA256 is a SHA256 of an empty string - emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` -) - -var ignoredHeaders = rules{ - blacklist{ - mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a whitelist for build canonical headers. -var requiredSignedHeaders = rules{ - whitelist{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, -} - -// allowedHoisting is a whitelist for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - // The authentication credentials the request will be signed against. - // This value must be set to sign requests. - Credentials aws.CredentialsProvider - - // Sets the log level the signer should use when reporting information to - // the logger. If the logger is nil nothing will be logged. See - // aws.LogLevel for more information on available logging levels - // - // By default nothing will be logged. - Debug aws.LogLevel - - // The logger loging information will be written to. If there the logger - // is nil, nothing will be logged. - Logger aws.Logger - - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool - - // Disales the automatical setting of the HTTP request's Body field with the - // io.ReadSeeker passed in to the signer. This is useful if you're using a - // custom wrapper around the body for the io.ReadSeeker and want to preserve - // the Body value on the Request.Body. - // - // This does run the risk of signing a request with a body that will not be - // sent in the request. Need to ensure that the underlying data of the Body - // values are the same. - DisableRequestBodyOverwrite bool - - // UnsignedPayload will prevent signing of the payload. This will only - // work for services that have support for this. - UnsignedPayload bool -} - -// NewSigner returns a Signer pointer configured with the credentials and optional -// option values provided. If not options are provided the Signer will use its -// default configuration. -func NewSigner(credsProvider aws.CredentialsProvider, options ...func(*Signer)) *Signer { - v4 := &Signer{ - Credentials: credsProvider, - } - - for _, option := range options { - option(v4) - } - - return v4 -} - -type signingCtx struct { - ServiceName string - Region string - Request *http.Request - Body io.ReadSeeker - Query url.Values - Time time.Time - ExpireTime time.Duration - SignedHeaderVals http.Header - - DisableURIPathEscaping bool - - credValues aws.Credentials - isPresign bool - formattedTime string - formattedShortTime string - unsignedPayload bool - - bodyDigest string - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string -} - -// Sign signs AWS v4 requests with the provided body, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. Generally for signed requests this value -// is not needed as the full request context will be captured by the http.Request -// value. It is included for reference though. -// -// Sign will set the request's Body to be the `body` parameter passed in. If -// the body is not already an io.ReadCloser, it will be wrapped within one. If -// a `nil` body parameter passed to Sign, the request's Body field will be -// also set to nil. Its important to note that this functionality will not -// change the request's ContentLength of the request. -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, signTime) -} - -// Presign signs AWS v4 requests with the provided body, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. For presigned requests these headers -// and their values must be included on the HTTP request when it is made. This -// is helpful to know what header values need to be shared with the party the -// presigned request will be distributed to. -// -// Presign differs from Sign in that it will sign the request using query string -// instead of header values. This allows you to share the Presigned Request's -// URL with third parties, or distribute it throughout your system with minimal -// dependencies. -// -// Presign also takes an exp value which is the duration the -// signed request will be valid after the signing time. This is allows you to -// set when the request will expire. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -// -// Presigning a S3 request will not compute the body's SHA256 hash by default. -// This is done due to the general use case for S3 presigned URLs is to share -// PUT/GET capabilities. If you would like to include the body's SHA256 in the -// presigned request's signature you can set the "X-Amz-Content-Sha256" -// HTTP header and that will be included in the request's signature. -func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, signTime) -} - -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - ctx := &signingCtx{ - Request: r, - Body: body, - Query: r.URL.Query(), - Time: signTime, - ExpireTime: exp, - isPresign: exp != 0, - ServiceName: service, - Region: region, - DisableURIPathEscaping: v4.DisableURIPathEscaping, - unsignedPayload: v4.UnsignedPayload, - } - - for key := range ctx.Query { - sort.Strings(ctx.Query[key]) - } - - if ctx.isRequestSigned() { - ctx.Time = sdk.NowTime() - ctx.handlePresignRemoval() - } - - var err error - ctx.credValues, err = v4.Credentials.Retrieve() - if err != nil { - return http.Header{}, err - } - - ctx.assignAmzQueryValues() - ctx.build(v4.DisableHeaderHoisting) - - // If the request is not presigned the body should be attached to it. This - // prevents the confusion of wanting to send a signed request without - // the body the request was signed for attached. - if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { - var reader io.ReadCloser - if body != nil { - var ok bool - if reader, ok = body.(io.ReadCloser); !ok { - reader = ioutil.NopCloser(body) - } - } - r.Body = reader - } - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo(ctx) - } - - return ctx.SignedHeaderVals, nil -} - -func (ctx *signingCtx) handlePresignRemoval() { - if !ctx.isPresign { - return - } - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - ctx.removePresign() - - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - ctx.Request.URL.RawQuery = ctx.Query.Encode() -} - -func (ctx *signingCtx) assignAmzQueryValues() { - if ctx.isPresign { - ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if ctx.credValues.SessionToken != "" { - ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } else { - ctx.Query.Del("X-Amz-Security-Token") - } - - return - } - - if ctx.credValues.SessionToken != "" { - ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } -} - -// SignRequestHandler is a named request handler the SDK will use to sign -// service client request with using the V4 signature. -var SignRequestHandler = aws.NamedHandler{ - Name: "v4.SignRequestHandler", Fn: func(r *aws.Request) { SignSDKRequest(r) }, -} - -// BuildNamedHandler will build a generic handler for signing. -func BuildNamedHandler(name string, opts ...func(*Signer)) aws.NamedHandler { - return aws.NamedHandler{ - Name: name, - Fn: func(req *aws.Request) { - SignSDKRequest(req, opts...) - }, - } -} - -// SignSDKRequest signs an AWS request with the V4 signature. This -// request handler should only be used with the SDK's built in service client's -// API operation requests. -// -// This function should not be used on its on its own, but in conjunction with -// an AWS service client's API operation call. To sign a standalone request -// not created by a service client's API operation method use the "Sign" or -// "Presign" functions of the "Signer" type. -// -// If the credentials of the request's config are set to -// aws.AnonymousCredentials the request will not be signed. -func SignSDKRequest(req *aws.Request, opts ...func(*Signer)) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == aws.AnonymousCredentials { - return - } - - region := req.Metadata.SigningRegion - if region == "" { - region = req.Config.Region - } - - name := req.Metadata.SigningName - if name == "" { - name = req.Metadata.ServiceName - } - - v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { - v4.Debug = req.Config.LogLevel - v4.Logger = req.Config.Logger - v4.DisableHeaderHoisting = req.NotHoist - if name == "s3" { - // S3 service should not have any escaping applied - v4.DisableURIPathEscaping = true - } - // Prevents setting the HTTPRequest's Body. Since the Body could be - // wrapped in a custom io.Closer that we do not want to be stompped - // on top of by the signer. - v4.DisableRequestBodyOverwrite = true - }) - - for _, opt := range opts { - opt(v4) - } - - signingTime := req.Time - if !req.LastSignedAt.IsZero() { - signingTime = req.LastSignedAt - } - - signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, signingTime, - ) - if err != nil { - req.Error = err - req.SignedHeaderVals = nil - return - } - - req.SignedHeaderVals = signedHeaders - req.LastSignedAt = sdk.NowTime() -} - -const logSignInfoMsg = `DEBUG: Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *Signer) logSigningInfo(ctx *signingCtx) { - signedURLMsg := "" - if ctx.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (ctx *signingCtx) build(disableHeaderHoisting bool) { - ctx.buildTime() // no depends - ctx.buildCredentialString() // no depends - - ctx.buildBodyDigest() - - unsignedHeaders := ctx.Request.Header - if ctx.isPresign { - if !disableHeaderHoisting { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - ctx.Query[k] = urlValues[k] - } - } - } - - ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - ctx.buildCanonicalString() // depends on canon headers / signed headers - ctx.buildStringToSign() // depends on canon string - ctx.buildSignature() // depends on string to sign - - if ctx.isPresign { - ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, - "SignedHeaders=" + ctx.signedHeaders, - "Signature=" + ctx.signature, - } - ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) - } -} - -func (ctx *signingCtx) buildTime() { - ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) - ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) - - if ctx.isPresign { - duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", ctx.formattedTime) - ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) - } -} - -func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = strings.Join([]string{ - ctx.formattedShortTime, - ctx.Region, - ctx.ServiceName, - "aws4_request", - }, "/") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { - continue // ignored header - } - if ctx.SignedHeaderVals == nil { - ctx.SignedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { - // include additional values - ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - ctx.SignedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - ctx.signedHeaders = strings.Join(headers, ";") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - if ctx.Request.Host != "" { - headerValues[i] = "host:" + ctx.Request.Host - } else { - headerValues[i] = "host:" + ctx.Request.URL.Host - } - } else { - headerValues[i] = k + ":" + - strings.Join(ctx.SignedHeaderVals[k], ",") - } - } - stripExcessSpaces(headerValues) - ctx.canonicalHeaders = strings.Join(headerValues, "\n") -} - -func (ctx *signingCtx) buildCanonicalString() { - ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) - - uri := getURIPath(ctx.Request.URL) - - if !ctx.DisableURIPathEscaping { - uri = rest.EscapePath(uri, false) - } - - ctx.canonicalString = strings.Join([]string{ - ctx.Request.Method, - uri, - ctx.Request.URL.RawQuery, - ctx.canonicalHeaders + "\n", - ctx.signedHeaders, - ctx.bodyDigest, - }, "\n") -} - -func (ctx *signingCtx) buildStringToSign() { - ctx.stringToSign = strings.Join([]string{ - authHeaderPrefix, - ctx.formattedTime, - ctx.credentialString, - hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), - }, "\n") -} - -func (ctx *signingCtx) buildSignature() { - secret := ctx.credValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) - region := makeHmac(date, []byte(ctx.Region)) - service := makeHmac(region, []byte(ctx.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(ctx.stringToSign)) - ctx.signature = hex.EncodeToString(signature) -} - -func (ctx *signingCtx) buildBodyDigest() { - hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - includeSHA256Header := ctx.unsignedPayload || - ctx.ServiceName == "s3" || - ctx.ServiceName == "glacier" - - s3Presign := ctx.isPresign && ctx.ServiceName == "s3" - - if ctx.unsignedPayload || s3Presign { - hash = "UNSIGNED-PAYLOAD" - includeSHA256Header = !s3Presign - } else if ctx.Body == nil { - hash = emptyStringSHA256 - } else { - hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) - } - - if includeSHA256Header { - ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) - } - } - ctx.bodyDigest = hash -} - -// isRequestSigned returns if the request is currently signed or presigned -func (ctx *signingCtx) isRequestSigned() bool { - if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { - return true - } - if ctx.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (ctx *signingCtx) removePresign() { - ctx.Query.Del("X-Amz-Algorithm") - ctx.Query.Del("X-Amz-Signature") - ctx.Query.Del("X-Amz-Security-Token") - ctx.Query.Del("X-Amz-Date") - ctx.Query.Del("X-Amz-Expires") - ctx.Query.Del("X-Amz-Credential") - ctx.Query.Del("X-Amz-SignedHeaders") -} - -func makeHmac(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) []byte { - hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) - - io.Copy(hash, reader) - return hash.Sum(nil) -} - -const doubleSpace = " " - -// stripExcessSpaces will rewrite the passed in slice's string values to not -// contain muliple side-by-side spaces. -func stripExcessSpaces(vals []string) { - var j, k, l, m, spaces int - for i, str := range vals { - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - vals[i] = str - continue - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - vals[i] = string(buf[:m]) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/static_provider.go deleted file mode 100644 index 0870016a..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/static_provider.go +++ /dev/null @@ -1,52 +0,0 @@ -package aws - -import ( - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -// StaticCredentialsProviderName provides a name of Static provider -const StaticCredentialsProviderName = "StaticCredentialsProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticCredentialsProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticCredentialsProvider struct { - Value Credentials -} - -// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS credentials -// passed in. -func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider { - return StaticCredentialsProvider{ - Value: Credentials{ - AccessKeyID: key, - SecretAccessKey: secret, - SessionToken: session, - }, - } -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s StaticCredentialsProvider) Retrieve() (Credentials, error) { - v := s.Value - if v.AccessKeyID == "" || v.SecretAccessKey == "" { - return Credentials{Source: StaticCredentialsProviderName}, ErrStaticCredentialsEmpty - } - - if len(v.Source) == 0 { - v.Source = StaticCredentialsProviderName - } - - return v, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticCredentialsProvider, the credentials never expired. -func (s StaticCredentialsProvider) IsExpired() bool { - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/stscreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/stscreds/provider.go deleted file mode 100644 index 549a2c1a..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/stscreds/provider.go +++ /dev/null @@ -1,264 +0,0 @@ -/* -Package stscreds are credential Providers to retrieve STS AWS credentials. - -STS provides multiple ways to retrieve credentials which can be used when making -future AWS service API operation calls. - -The SDK will ensure that per instance of credentials.Credentials all requests -to refresh the credentials will be synchronized. But, the SDK is unable to -ensure synchronous usage of the AssumeRoleProvider if the value is shared -between multiple Credentials or service clients. - -Assume Role - -To assume an IAM role using STS with the SDK you can create a new Credentials -with the SDKs's stscreds package. - - // Initial credentials loaded from SDK's default credential chain. Such as - // the environment, shared credentials (~/.aws/credentials), or EC2 Instance - // Role. These credentials will be used to to make the STS Assume Role API. - cfg, err := external.LoadDefaultAWSConfig() - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. - stsSvc := sts.New(cfg) - stsCredProvider := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") - - cfg.Credentials = aws.NewCredentials(stsCredProvider) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(cfg) - -Assume Role with static MFA Token - -To assume an IAM role with a MFA token you can either specify a MFA token code -directly or provide a function to prompt the user each time the credentials -need to refresh the role's credentials. Specifying the TokenCode should be used -for short lived operations that will not need to be refreshed, and when you do -not want to have direct control over the user provides their MFA token. - -With TokenCode the AssumeRoleProvider will be not be able to refresh the role's -credentials. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN using the MFA token code provided. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenCode = aws.String("00000000") - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with MFA Token Provider - -To assume an IAM role with MFA for longer running tasks where the credentials -may need to be refreshed setting the TokenProvider field of AssumeRoleProvider -will allow the credential provider to prompt for new MFA token code when the -role's credentials need to be refreshed. - -The StdinTokenProvider function is available to prompt on stdin to retrieve -the MFA token code from the user. You can also implement custom prompts by -satisfing the TokenProvider function signature. - -Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -have undesirable results as the StdinTokenProvider will not be synchronized. A -single Credentials with an AssumeRoleProvider can be shared safely. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenProvider = stscreds.StdinTokenProvider - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -*/ -package stscreds - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/service/sts" -) - -// StdinTokenProvider will prompt on stdout and read from stdin for a string value. -// An error is returned if reading from stdin fails. -// -// Use this function go read MFA tokens from stdin. The function makes no attempt -// to make atomic prompts from stdin across multiple gorouties. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely -// -// Will wait forever until something is provided on the stdin. -func StdinTokenProvider() (string, error) { - var v string - fmt.Printf("Assume Role MFA token code: ") - _, err := fmt.Scanln(&v) - - return v, err -} - -// ProviderName provides a name of AssumeRole provider -const ProviderName = "AssumeRoleProvider" - -// AssumeRoler represents the minimal subset of the STS client API used by this provider. -type AssumeRoler interface { - AssumeRoleRequest(input *sts.AssumeRoleInput) sts.AssumeRoleRequest -} - -// DefaultDuration is the default amount of time in minutes that the credentials -// will be valid for. -var DefaultDuration = time.Duration(15) * time.Minute - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. -// -// This credential provider will be used by the SDKs default credential change -// when shared configuration is enabled, and the shared config or shared credentials -// file configure assume role. See Session docs for how to do this. -// -// AssumeRoleProvider does not provide any synchronization and it is not safe -// to share this value across multiple Credentials, Sessions, or service clients -// without also sharing the same Credentials instance. -type AssumeRoleProvider struct { - aws.SafeCredentialsProvider - - // STS client to make assume role request with. - Client AssumeRoler - - // Role to be assumed. - RoleARN string - - // Session name, if you wish to reuse the credentials elsewhere. - RoleSessionName string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // Optional ExternalID to pass along, defaults to nil if not set. - ExternalID *string - - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - SerialNumber *string - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // If SerialNumber is set and neither TokenCode nor TokenProvider are also - // set an error will be returned. - TokenCode *string - - // Async method of providing MFA token code for assuming an IAM role with MFA. - // The value returned by the function will be used as the TokenCode in the Retrieve - // call. See StdinTokenProvider for a provider that prompts and reads from stdin. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed when SerialNumber is also set and - // TokenCode is not set. - // - // If both TokenCode and TokenProvider is set, TokenProvider will be used and - // TokenCode is ignored. - TokenProvider func() (string, error) - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewAssumeRoleProvider constructs and returns a credentials provider that -// will retrieve credentials by assuming a IAM role using STS. -func NewAssumeRoleProvider(client AssumeRoler, roleARN string) *AssumeRoleProvider { - p := &AssumeRoleProvider{ - Client: client, - RoleARN: roleARN, - } - p.RetrieveFn = p.retrieveFn - - return p -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) retrieveFn() (aws.Credentials, error) { - // Apply defaults where parameters are not set. - if len(p.RoleSessionName) == 0 { - // Try to work out a role name that will hopefully end up unique. - p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) - } - if p.Duration == 0 { - // Expire as often as AWS permits. - p.Duration = DefaultDuration - } - input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, - } - if p.Policy != nil { - input.Policy = p.Policy - } - if p.SerialNumber != nil { - if p.TokenCode != nil { - input.SerialNumber = p.SerialNumber - input.TokenCode = p.TokenCode - } else if p.TokenProvider != nil { - input.SerialNumber = p.SerialNumber - code, err := p.TokenProvider() - if err != nil { - return aws.Credentials{}, err - } - input.TokenCode = aws.String(code) - } else { - return aws.Credentials{}, - awserr.New("AssumeRoleTokenNotAvailable", - "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) - } - } - - req := p.Client.AssumeRoleRequest(input) - resp, err := req.Send() - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - return aws.Credentials{ - AccessKeyID: *resp.Credentials.AccessKeyId, - SecretAccessKey: *resp.Credentials.SecretAccessKey, - SessionToken: *resp.Credentials.SessionToken, - Source: ProviderName, - - CanExpire: true, - Expires: resp.Credentials.Expiration.Add(-p.ExpiryWindow), - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/timeout_read_closer.go deleted file mode 100644 index d8b5124e..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/timeout_read_closer.go +++ /dev/null @@ -1,94 +0,0 @@ -package aws - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -var timeoutErr = awserr.New( - ErrCodeResponseTimeout, - "read on body has reached the timeout limit", - nil, -) - -type readResult struct { - n int - err error -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, timeoutErr - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -const ( - // HandlerResponseTimeout is what we use to signify the name of the - // response timeout handler. - HandlerResponseTimeout = "ResponseTimeoutHandler" -) - -// adaptToResponseTimeoutError is a handler that will replace any top level error -// to a ErrCodeResponseTimeout, if its child is that. -func adaptToResponseTimeoutError(req *Request) { - if err, ok := req.Error.(awserr.Error); ok { - aerr, ok := err.OrigErr().(awserr.Error) - if ok && aerr.Code() == ErrCodeResponseTimeout { - req.Error = aerr - } - } -} - -// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. -// This will allow for per read timeouts. If a timeout occurred, we will return the -// ErrCodeResponseTimeout. -// -// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) -func WithResponseReadTimeout(duration time.Duration) Option { - return func(r *Request) { - - var timeoutHandler = NamedHandler{ - HandlerResponseTimeout, - func(req *Request) { - req.HTTPResponse.Body = &timeoutReadCloser{ - reader: req.HTTPResponse.Body, - duration: duration, - } - }} - - // remove the handler so we are not stomping over any new durations. - r.Handlers.Send.RemoveByName(HandlerResponseTimeout) - r.Handlers.Send.PushBackNamed(timeoutHandler) - - r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) - r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go deleted file mode 100644 index 0e2d864e..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go +++ /dev/null @@ -1,118 +0,0 @@ -package aws - -import ( - "io" - "sync" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should -// only be used with an io.Reader that is also an io.Seeker. Doing so may -// cause request signature errors, or request body's not sent for GET, HEAD -// and DELETE HTTP methods. -// -// Deprecated: Should only be used with io.ReadSeeker. If using for -// S3 PutObject to stream content use s3manager.Uploader instead. -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// IsSeeker returns if the underlying reader is also a seeker. -func (r ReaderSeekerCloser) IsSeeker() bool { - _, ok := r.r.(io.Seeker) - return ok -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/url.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/url.go deleted file mode 100644 index 6192b245..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/url.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.8 - -package aws - -import "net/url" - -// URLHostname will extract the Hostname without port from the URL value. -// -// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. -func URLHostname(url *url.URL) string { - return url.Hostname() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/url_1_7.go deleted file mode 100644 index 0210d272..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/url_1_7.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !go1.8 - -package aws - -import ( - "net/url" - "strings" -) - -// URLHostname will extract the Hostname without port from the URL value. -// -// Copy of Go 1.8's net/url#URL.Hostname functionality. -func URLHostname(url *url.URL) string { - return stripPort(url.Host) - -} - -// stripPort is copy of Go 1.8 url#URL.Hostname functionality. -// https://golang.org/src/net/url/url.go -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/validation.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/validation.go deleted file mode 100644 index 1f11aa28..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/validation.go +++ /dev/null @@ -1,286 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" - // ParamMaxLenErrCode is the error code for value being too long. - ParamMaxLenErrCode = "ParamMaxLenError" - - // ParamFormatErrCode is the error code for a field with invalid - // format or characters. - ParamFormatErrCode = "ParamFormatInvalidError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinLenErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} - -// An ErrParamMaxLen represents a maximum length parameter error. -type ErrParamMaxLen struct { - errInvalidParam - max int -} - -// NewErrParamMaxLen creates a new maximum length parameter error. -func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { - return &ErrParamMaxLen{ - errInvalidParam: errInvalidParam{ - code: ParamMaxLenErrCode, - field: field, - msg: fmt.Sprintf("maximum size of %v, %v", max, value), - }, - max: max, - } -} - -// MaxLen returns the field's required minimum length. -func (e *ErrParamMaxLen) MaxLen() int { - return e.max -} - -// An ErrParamFormat represents a invalid format parameter error. -type ErrParamFormat struct { - errInvalidParam - format string -} - -// NewErrParamFormat creates a new invalid format parameter error. -func NewErrParamFormat(field string, format, value string) *ErrParamFormat { - return &ErrParamFormat{ - errInvalidParam: errInvalidParam{ - code: ParamFormatErrCode, - field: field, - msg: fmt.Sprintf("format %v, %v", format, value), - }, - format: format, - } -} - -// Format returns the field's required format. -func (e *ErrParamFormat) Format() string { - return e.format -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go deleted file mode 100644 index abd858ca..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "0.7.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/waiter.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/waiter.go deleted file mode 100644 index 6e3f7e7b..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/waiter.go +++ /dev/null @@ -1,284 +0,0 @@ -package aws - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/internal/awsutil" - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when -// the waiter's max attempts have been exhausted. -const WaiterResourceNotReadyErrorCode = "ResourceNotReady" - -// A WaiterOption is a function that will update the Waiter value's fields to -// configure the waiter. -type WaiterOption func(*Waiter) - -// WithWaiterMaxAttempts returns the maximum number of times the waiter should -// attempt to check the resource for the target state. -func WithWaiterMaxAttempts(max int) WaiterOption { - return func(w *Waiter) { - w.MaxAttempts = max - } -} - -// WaiterDelay will return a delay the waiter should pause between attempts to -// check the resource state. The passed in attempt is the number of times the -// Waiter has checked the resource state. -// -// Attempt is the number of attempts the Waiter has made checking the resource -// state. -type WaiterDelay func(attempt int) time.Duration - -// ConstantWaiterDelay returns a WaiterDelay that will always return a constant -// delay the waiter should use between attempts. It ignores the number of -// attempts made. -func ConstantWaiterDelay(delay time.Duration) WaiterDelay { - return func(attempt int) time.Duration { - return delay - } -} - -// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. -func WithWaiterDelay(delayer WaiterDelay) WaiterOption { - return func(w *Waiter) { - w.Delay = delayer - } -} - -// WithWaiterLogger returns a waiter option to set the logger a waiter -// should use to log warnings and errors to. -func WithWaiterLogger(logger Logger) WaiterOption { - return func(w *Waiter) { - w.Logger = logger - } -} - -// WithWaiterRequestOptions returns a waiter option setting the request -// options for each request the waiter makes. Appends to waiter's request -// options already set. -func WithWaiterRequestOptions(opts ...Option) WaiterOption { - return func(w *Waiter) { - w.RequestOptions = append(w.RequestOptions, opts...) - } -} - -// A Waiter provides the functionality to perform a blocking call which will -// wait for a resource state to be satisfied by a service. -// -// This type should not be used directly. The API operations provided in the -// service packages prefixed with "WaitUntil" should be used instead. -type Waiter struct { - Name string - Acceptors []WaiterAcceptor - Logger Logger - - MaxAttempts int - Delay WaiterDelay - - RequestOptions []Option - NewRequest func([]Option) (*Request, error) - SleepWithContext func(Context, time.Duration) error -} - -// ApplyOptions updates the waiter with the list of waiter options provided. -func (w *Waiter) ApplyOptions(opts ...WaiterOption) { - for _, fn := range opts { - fn(w) - } -} - -// WaiterState are states the waiter uses based on WaiterAcceptor definitions -// to identify if the resource state the waiter is waiting on has occurred. -type WaiterState int - -// String returns the string representation of the waiter state. -func (s WaiterState) String() string { - switch s { - case SuccessWaiterState: - return "success" - case FailureWaiterState: - return "failure" - case RetryWaiterState: - return "retry" - default: - return "unknown waiter state" - } -} - -// States the waiter acceptors will use to identify target resource states. -const ( - SuccessWaiterState WaiterState = iota // waiter successful - FailureWaiterState // waiter failed - RetryWaiterState // waiter needs to be retried -) - -// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor -// definition's Expected attribute. -type WaiterMatchMode int - -// Modes the waiter will use when inspecting API response to identify target -// resource states. -const ( - PathAllWaiterMatch WaiterMatchMode = iota // match on all paths - PathWaiterMatch // match on specific path - PathAnyWaiterMatch // match on any path - PathListWaiterMatch // match on list of paths - StatusWaiterMatch // match on status code - ErrorWaiterMatch // match on error -) - -// String returns the string representation of the waiter match mode. -func (m WaiterMatchMode) String() string { - switch m { - case PathAllWaiterMatch: - return "pathAll" - case PathWaiterMatch: - return "path" - case PathAnyWaiterMatch: - return "pathAny" - case PathListWaiterMatch: - return "pathList" - case StatusWaiterMatch: - return "status" - case ErrorWaiterMatch: - return "error" - default: - return "unknown waiter match mode" - } -} - -// WaitWithContext will make requests for the API operation using NewRequest to -// build API requests. The request's response will be compared against the -// Waiter's Acceptors to determine the successful state of the resource the -// waiter is inspecting. -// -// The passed in context must not be nil. If it is nil a panic will occur. The -// Context will be used to cancel the waiter's pending requests and retry delays. -// Use BackgroundContext if no context is available. -// -// The waiter will continue until the target state defined by the Acceptors, -// or the max attempts expires. -// -// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's -// retryer ShouldRetry returns false. This normally will happen when the max -// wait attempts expires. -func (w Waiter) WaitWithContext(ctx Context) error { - - for attempt := 1; ; attempt++ { - req, err := w.NewRequest(w.RequestOptions) - if err != nil { - waiterLogf(w.Logger, "unable to create request %v", err) - return err - } - req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) - err = req.Send() - - // See if any of the acceptors match the request's response, or error - for _, a := range w.Acceptors { - if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { - return matchErr - } - } - - // The Waiter should only check the resource state MaxAttempts times - // This is here instead of in the for loop above to prevent delaying - // unnecessary when the waiter will not retry. - if attempt == w.MaxAttempts { - break - } - - // Delay to wait before inspecting the resource again - if err := sdk.SleepWithContext(ctx, w.Delay(attempt)); err != nil { - return awserr.New(ErrCodeRequestCanceled, "waiter context canceled", err) - } - } - - return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) -} - -// A WaiterAcceptor provides the information needed to wait for an API operation -// to complete. -type WaiterAcceptor struct { - State WaiterState - Matcher WaiterMatchMode - Argument string - Expected interface{} -} - -// match returns if the acceptor found a match with the passed in request -// or error. True is returned if the acceptor made a match, error is returned -// if there was an error attempting to perform the match. -func (a *WaiterAcceptor) match(name string, l Logger, req *Request, err error) (bool, error) { - result := false - var vals []interface{} - - switch a.Matcher { - case PathAllWaiterMatch, PathWaiterMatch: - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case PathAnyWaiterMatch: - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case PathListWaiterMatch: - // ignored matcher - case StatusWaiterMatch: - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", - name, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - return false, nil - } - - switch a.State { - case SuccessWaiterState: - // waiter completed - return true, nil - case FailureWaiterState: - // Waiter failure state triggered - return true, awserr.New(WaiterResourceNotReadyErrorCode, - "failed waiting for successful resource state", err) - case RetryWaiterState: - // clear the error and retry the operation - return false, nil - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", - name, a.State) - return false, nil - } -} - -func waiterLogf(logger Logger, msg string, args ...interface{}) { - if logger != nil { - logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go deleted file mode 100644 index 938cd14c..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go +++ /dev/null @@ -1,112 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" - "time" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - if _, ok := src.Interface().(*time.Time); !ok { - if dst.Kind() == reflect.String { - dst.SetString(e.String()) - } else { - dst.Set(reflect.New(e)) - } - } else { - tempValue := reflect.New(e) - tempValue.Elem().Set(src.Elem()) - // Sets time.Time's unexported values - dst.Set(tempValue) - } - } - if dst.Kind() != reflect.String && src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go deleted file mode 100644 index bcfe51a2..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go +++ /dev/null @@ -1,33 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - // Special casing for strings as typed enumerations are string aliases - // but are not deep equal. - if ra.Kind() == reflect.String && rb.Kind() == reflect.String { - return ra.String() == rb.String() - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go deleted file mode 100644 index 7e69bd5e..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go +++ /dev/null @@ -1,225 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - if dstVal.Kind() == reflect.String { - dstVal.SetString(srcVal.String()) - } else { - dstVal.Set(srcVal) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go deleted file mode 100644 index 710eb432..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go +++ /dev/null @@ -1,113 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - strtype := v.Type().String() - if strtype == "[]uint8" { - fmt.Fprintf(buf, " len %d", v.Len()) - break - } - - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go deleted file mode 100644 index b6432f1a..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go +++ /dev/null @@ -1,89 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - stringValue(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go deleted file mode 100644 index e83a9988..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go +++ /dev/null @@ -1,120 +0,0 @@ -package ini - -// ASTKind represents different states in the parse table -// and the type of AST that is being constructed -type ASTKind int - -// ASTKind* is used in the parse table to transition between -// the different states -const ( - ASTKindNone = ASTKind(iota) - ASTKindStart - ASTKindExpr - ASTKindEqualExpr - ASTKindStatement - ASTKindSkipStatement - ASTKindExprStatement - ASTKindSectionStatement - ASTKindNestedSectionStatement - ASTKindCompletedNestedSectionStatement - ASTKindCommentStatement - ASTKindCompletedSectionStatement -) - -func (k ASTKind) String() string { - switch k { - case ASTKindNone: - return "none" - case ASTKindStart: - return "start" - case ASTKindExpr: - return "expr" - case ASTKindStatement: - return "stmt" - case ASTKindSectionStatement: - return "section_stmt" - case ASTKindExprStatement: - return "expr_stmt" - case ASTKindCommentStatement: - return "comment" - case ASTKindNestedSectionStatement: - return "nested_section_stmt" - case ASTKindCompletedSectionStatement: - return "completed_stmt" - case ASTKindSkipStatement: - return "skip" - default: - return "" - } -} - -// AST interface allows us to determine what kind of node we -// are on and casting may not need to be necessary. -// -// The root is always the first node in Children -type AST struct { - Kind ASTKind - Root Token - RootToken bool - Children []AST -} - -func newAST(kind ASTKind, root AST, children ...AST) AST { - return AST{ - Kind: kind, - Children: append([]AST{root}, children...), - } -} - -func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { - return AST{ - Kind: kind, - Root: root, - RootToken: true, - Children: children, - } -} - -// AppendChild will append to the list of children an AST has. -func (a *AST) AppendChild(child AST) { - a.Children = append(a.Children, child) -} - -// GetRoot will return the root AST which can be the first entry -// in the children list or a token. -func (a *AST) GetRoot() AST { - if a.RootToken { - return *a - } - - if len(a.Children) == 0 { - return AST{} - } - - return a.Children[0] -} - -// GetChildren will return the current AST's list of children -func (a *AST) GetChildren() []AST { - if len(a.Children) == 0 { - return []AST{} - } - - if a.RootToken { - return a.Children - } - - return a.Children[1:] -} - -// SetChildren will set and override all children of the AST. -func (a *AST) SetChildren(children []AST) { - if a.RootToken { - a.Children = children - } else { - a.Children = append(a.Children[:1], children...) - } -} - -// Start is used to indicate the starting state of the parse table. -var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go deleted file mode 100644 index 0895d53c..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go +++ /dev/null @@ -1,11 +0,0 @@ -package ini - -var commaRunes = []rune(",") - -func isComma(b rune) bool { - return b == ',' -} - -func newCommaToken() Token { - return newToken(TokenComma, commaRunes, NoneType) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go deleted file mode 100644 index 0b76999b..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// isComment will return whether or not the next byte(s) is a -// comment. -func isComment(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case ';': - return true - case '#': - return true - } - - return false -} - -// newCommentToken will create a comment token and -// return how many bytes were read. -func newCommentToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if b[i] == '\n' { - break - } - - if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { - break - } - } - - return newToken(TokenComment, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go deleted file mode 100644 index 25ce0fe1..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package ini is an LL(1) parser for configuration files. -// -// Example: -// sections, err := ini.OpenFile("/path/to/file") -// if err != nil { -// panic(err) -// } -// -// profile := "foo" -// section, ok := sections.GetSection(profile) -// if !ok { -// fmt.Printf("section %q could not be found", profile) -// } -// -// Below is the BNF that describes this parser -// Grammar: -// stmt -> value stmt' -// stmt' -> epsilon | op stmt -// value -> number | string | boolean | quoted_string -// -// section -> [ section' -// section' -> value section_close -// section_close -> ] -// -// SkipState will skip (NL WS)+ -// -// comment -> # comment' | ; comment' -// comment' -> epsilon | value -package ini diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go deleted file mode 100644 index 04345a54..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go +++ /dev/null @@ -1,4 +0,0 @@ -package ini - -// emptyToken is used to satisfy the Token interface -var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go deleted file mode 100644 index 91ba2a59..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -// newExpression will return an expression AST. -// Expr represents an expression -// -// grammar: -// expr -> string | number -func newExpression(tok Token) AST { - return newASTWithRootToken(ASTKindExpr, tok) -} - -func newEqualExpr(left AST, tok Token) AST { - return newASTWithRootToken(ASTKindEqualExpr, tok, left) -} - -// EqualExprKey will return a LHS value in the equal expr -func EqualExprKey(ast AST) string { - children := ast.GetChildren() - if len(children) == 0 || ast.Kind != ASTKindEqualExpr { - return "" - } - - return string(children[0].Root.Raw()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go deleted file mode 100644 index 8d462f77..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build gofuzz - -package ini - -import ( - "bytes" -) - -func Fuzz(data []byte) int { - b := bytes.NewReader(data) - - if _, err := Parse(b); err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go deleted file mode 100644 index af6f397d..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go +++ /dev/null @@ -1,51 +0,0 @@ -package ini - -import ( - "io" - "os" - - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -// OpenFile takes a path to a given file, and will open and parse -// that file. -func OpenFile(path string) (Sections, error) { - f, err := os.Open(path) - if err != nil { - return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) - } - defer f.Close() - - return Parse(f) -} - -// Parse will parse the given file using the shared config -// visitor. -func Parse(f io.Reader) (Sections, error) { - tree, err := ParseAST(f) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} - -// ParseBytes will parse the given bytes and return the parsed sections. -func ParseBytes(b []byte) (Sections, error) { - tree, err := ParseASTBytes(b) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go deleted file mode 100644 index 898ebb05..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go +++ /dev/null @@ -1,165 +0,0 @@ -package ini - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -const ( - // ErrCodeUnableToReadFile is used when a file is failed to be - // opened or read from. - ErrCodeUnableToReadFile = "FailedRead" -) - -// TokenType represents the various different tokens types -type TokenType int - -func (t TokenType) String() string { - switch t { - case TokenNone: - return "none" - case TokenLit: - return "literal" - case TokenSep: - return "sep" - case TokenOp: - return "op" - case TokenWS: - return "ws" - case TokenNL: - return "newline" - case TokenComment: - return "comment" - case TokenComma: - return "comma" - default: - return "" - } -} - -// TokenType enums -const ( - TokenNone = TokenType(iota) - TokenLit - TokenSep - TokenComma - TokenOp - TokenWS - TokenNL - TokenComment -) - -type iniLexer struct{} - -// Tokenize will return a list of tokens during lexical analysis of the -// io.Reader. -func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) - } - - return l.tokenize(b) -} - -func (l *iniLexer) tokenize(b []byte) ([]Token, error) { - runes := bytes.Runes(b) - var err error - n := 0 - tokenAmount := countTokens(runes) - tokens := make([]Token, tokenAmount) - count := 0 - - for len(runes) > 0 && count < tokenAmount { - switch { - case isWhitespace(runes[0]): - tokens[count], n, err = newWSToken(runes) - case isComma(runes[0]): - tokens[count], n = newCommaToken(), 1 - case isComment(runes): - tokens[count], n, err = newCommentToken(runes) - case isNewline(runes): - tokens[count], n, err = newNewlineToken(runes) - case isSep(runes): - tokens[count], n, err = newSepToken(runes) - case isOp(runes): - tokens[count], n, err = newOpToken(runes) - default: - tokens[count], n, err = newLitToken(runes) - } - - if err != nil { - return nil, err - } - - count++ - - runes = runes[n:] - } - - return tokens[:count], nil -} - -func countTokens(runes []rune) int { - count, n := 0, 0 - var err error - - for len(runes) > 0 { - switch { - case isWhitespace(runes[0]): - _, n, err = newWSToken(runes) - case isComma(runes[0]): - _, n = newCommaToken(), 1 - case isComment(runes): - _, n, err = newCommentToken(runes) - case isNewline(runes): - _, n, err = newNewlineToken(runes) - case isSep(runes): - _, n, err = newSepToken(runes) - case isOp(runes): - _, n, err = newOpToken(runes) - default: - _, n, err = newLitToken(runes) - } - - if err != nil { - return 0 - } - - count++ - runes = runes[n:] - } - - return count + 1 -} - -// Token indicates a metadata about a given value. -type Token struct { - t TokenType - ValueType ValueType - base int - raw []rune -} - -var emptyValue = Value{} - -func newToken(t TokenType, raw []rune, v ValueType) Token { - return Token{ - t: t, - raw: raw, - ValueType: v, - } -} - -// Raw return the raw runes that were consumed -func (tok Token) Raw() []rune { - return tok.raw -} - -// Type returns the token type -func (tok Token) Type() TokenType { - return tok.t -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go deleted file mode 100644 index 8be520ae..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go +++ /dev/null @@ -1,347 +0,0 @@ -package ini - -import ( - "fmt" - "io" -) - -// State enums for the parse table -const ( - InvalidState = iota - // stmt -> value stmt' - StatementState - // stmt' -> MarkComplete | op stmt - StatementPrimeState - // value -> number | string | boolean | quoted_string - ValueState - // section -> [ section' - OpenScopeState - // section' -> value section_close - SectionState - // section_close -> ] - CloseScopeState - // SkipState will skip (NL WS)+ - SkipState - // SkipTokenState will skip any token and push the previous - // state onto the stack. - SkipTokenState - // comment -> # comment' | ; comment' - // comment' -> MarkComplete | value - CommentState - // MarkComplete state will complete statements and move that - // to the completed AST list - MarkCompleteState - // TerminalState signifies that the tokens have been fully parsed - TerminalState -) - -// parseTable is a state machine to dictate the grammar above. -var parseTable = map[ASTKind]map[TokenType]int{ - ASTKindStart: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, - ASTKindCommentStatement: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExpr: map[TokenType]int{ - TokenOp: StatementPrimeState, - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenWS: ValueState, - TokenNL: SkipState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindEqualExpr: map[TokenType]int{ - TokenLit: ValueState, - TokenWS: SkipTokenState, - TokenNL: SkipState, - }, - ASTKindStatement: map[TokenType]int{ - TokenLit: SectionState, - TokenSep: CloseScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExprStatement: map[TokenType]int{ - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenOp: ValueState, - TokenWS: ValueState, - TokenNL: MarkCompleteState, - TokenComment: CommentState, - TokenNone: TerminalState, - TokenComma: SkipState, - }, - ASTKindSectionStatement: map[TokenType]int{ - TokenLit: SectionState, - TokenOp: SectionState, - TokenSep: CloseScopeState, - TokenWS: SectionState, - TokenNL: SkipTokenState, - }, - ASTKindCompletedSectionStatement: map[TokenType]int{ - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindSkipStatement: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, -} - -// ParseAST will parse input from an io.Reader using -// an LL(1) parser. -func ParseAST(r io.Reader) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.Tokenize(r) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -// ParseASTBytes will parse input from a byte slice using -// an LL(1) parser. -func ParseASTBytes(b []byte) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.tokenize(b) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -func parse(tokens []Token) ([]AST, error) { - start := Start - stack := newParseStack(3, len(tokens)) - - stack.Push(start) - s := newSkipper() - -loop: - for stack.Len() > 0 { - k := stack.Pop() - - var tok Token - if len(tokens) == 0 { - // this occurs when all the tokens have been processed - // but reduction of what's left on the stack needs to - // occur. - tok = emptyToken - } else { - tok = tokens[0] - } - - step := parseTable[k.Kind][tok.Type()] - if s.ShouldSkip(tok) { - // being in a skip state with no tokens will break out of - // the parse loop since there is nothing left to process. - if len(tokens) == 0 { - break loop - } - - step = SkipTokenState - } - - switch step { - case TerminalState: - // Finished parsing. Push what should be the last - // statement to the stack. If there is anything left - // on the stack, an error in parsing has occurred. - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - break loop - case SkipTokenState: - // When skipping a token, the previous state was popped off the stack. - // To maintain the correct state, the previous state will be pushed - // onto the stack. - stack.Push(k) - case StatementState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - expr := newExpression(tok) - stack.Push(expr) - case StatementPrimeState: - if tok.Type() != TokenOp { - stack.MarkComplete(k) - continue - } - - if k.Kind != ASTKindExpr { - return nil, NewParseError( - fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), - ) - } - - k = trimSpaces(k) - expr := newEqualExpr(k, tok) - stack.Push(expr) - case ValueState: - // ValueState requires the previous state to either be an equal expression - // or an expression statement. - // - // This grammar occurs when the RHS is a number, word, or quoted string. - // equal_expr -> lit op equal_expr' - // equal_expr' -> number | string | quoted_string - // quoted_string -> " quoted_string' - // quoted_string' -> string quoted_string_end - // quoted_string_end -> " - // - // otherwise - // expr_stmt -> equal_expr (expr_stmt')* - // expr_stmt' -> ws S | op S | MarkComplete - // S -> equal_expr' expr_stmt' - switch k.Kind { - case ASTKindEqualExpr: - // assiging a value to some key - k.AppendChild(newExpression(tok)) - stack.Push(newExprStatement(k)) - case ASTKindExpr: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stack.Push(k) - case ASTKindExprStatement: - root := k.GetRoot() - children := root.GetChildren() - if len(children) == 0 { - return nil, NewParseError( - fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), - ) - } - - rhs := children[len(children)-1] - - if rhs.Root.ValueType != QuotedStringType { - rhs.Root.ValueType = StringType - rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) - - } - - children[len(children)-1] = rhs - k.SetChildren(children) - - stack.Push(k) - } - case OpenScopeState: - if !runeCompare(tok.Raw(), openBrace) { - return nil, NewParseError("expected '['") - } - - stmt := newStatement() - stack.Push(stmt) - case CloseScopeState: - if !runeCompare(tok.Raw(), closeBrace) { - return nil, NewParseError("expected ']'") - } - - k = trimSpaces(k) - stack.Push(newCompletedSectionStatement(k)) - case SectionState: - var stmt AST - - switch k.Kind { - case ASTKindStatement: - // If there are multiple literals inside of a scope declaration, - // then the current token's raw value will be appended to the Name. - // - // This handles cases like [ profile default ] - // - // k will represent a SectionStatement with the children representing - // the label of the section - stmt = newSectionStatement(tok) - case ASTKindSectionStatement: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stmt = k - default: - return nil, NewParseError( - fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), - ) - } - - stack.Push(stmt) - case MarkCompleteState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - if stack.Len() == 0 { - stack.Push(start) - } - case SkipState: - stack.Push(newSkipStatement(k)) - s.Skip() - case CommentState: - if k.Kind == ASTKindStart { - stack.Push(k) - } else { - stack.MarkComplete(k) - } - - stmt := newCommentStatement(tok) - stack.Push(stmt) - default: - return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok)) - } - - if len(tokens) > 0 { - tokens = tokens[1:] - } - } - - // this occurs when a statement has not been completed - if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) - } - - // returns a sublist which exludes the start symbol - return stack.List(), nil -} - -// trimSpaces will trim spaces on the left and right hand side of -// the literal. -func trimSpaces(k AST) AST { - // trim left hand side of spaces - for i := 0; i < len(k.Root.raw); i++ { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[1:] - i-- - } - - // trim right hand side of spaces - for i := len(k.Root.raw) - 1; i >= 0; i-- { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] - } - - return k -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go deleted file mode 100644 index 24df543d..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go +++ /dev/null @@ -1,324 +0,0 @@ -package ini - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - runesTrue = []rune("true") - runesFalse = []rune("false") -) - -var literalValues = [][]rune{ - runesTrue, - runesFalse, -} - -func isBoolValue(b []rune) bool { - for _, lv := range literalValues { - if isLitValue(lv, b) { - return true - } - } - return false -} - -func isLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != have[i] { - return false - } - } - - return true -} - -// isNumberValue will return whether not the leading characters in -// a byte slice is a number. A number is delimited by whitespace or -// the newline token. -// -// A number is defined to be in a binary, octal, decimal (int | float), hex format, -// or in scientific notation. -func isNumberValue(b []rune) bool { - negativeIndex := 0 - helper := numberHelper{} - needDigit := false - - for i := 0; i < len(b); i++ { - negativeIndex++ - - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return false - } - helper.Determine(b[i]) - needDigit = true - continue - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return false - } - negativeIndex = 0 - needDigit = true - continue - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - needDigit = true - if i == 0 { - return false - } - - fallthrough - case '.': - if err := helper.Determine(b[i]); err != nil { - return false - } - needDigit = true - continue - } - - if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { - return !needDigit - } - - if !helper.CorrectByte(b[i]) { - return false - } - needDigit = false - } - - return !needDigit -} - -func isValid(b []rune) (bool, int, error) { - if len(b) == 0 { - // TODO: should probably return an error - return false, 0, nil - } - - return isValidRune(b[0]), 1, nil -} - -func isValidRune(r rune) bool { - return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' -} - -// ValueType is an enum that will signify what type -// the Value is -type ValueType int - -func (v ValueType) String() string { - switch v { - case NoneType: - return "NONE" - case DecimalType: - return "FLOAT" - case IntegerType: - return "INT" - case StringType: - return "STRING" - case BoolType: - return "BOOL" - } - - return "" -} - -// ValueType enums -const ( - NoneType = ValueType(iota) - DecimalType - IntegerType - StringType - QuotedStringType - BoolType -) - -// Value is a union container -type Value struct { - Type ValueType - raw []rune - - integer int64 - decimal float64 - boolean bool - str string -} - -func newValue(t ValueType, base int, raw []rune) (Value, error) { - v := Value{ - Type: t, - raw: raw, - } - var err error - - switch t { - case DecimalType: - v.decimal, err = strconv.ParseFloat(string(raw), 64) - case IntegerType: - if base != 10 { - raw = raw[2:] - } - - v.integer, err = strconv.ParseInt(string(raw), base, 64) - case StringType: - v.str = string(raw) - case QuotedStringType: - v.str = string(raw[1 : len(raw)-1]) - case BoolType: - v.boolean = runeCompare(v.raw, runesTrue) - } - - // issue 2253 - // - // if the value trying to be parsed is too large, then we will use - // the 'StringType' and raw value instead. - if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { - v.Type = StringType - v.str = string(raw) - err = nil - } - - return v, err -} - -// Append will append values and change the type to a string -// type. -func (v *Value) Append(tok Token) { - r := tok.Raw() - if v.Type != QuotedStringType { - v.Type = StringType - r = tok.raw[1 : len(tok.raw)-1] - } - if tok.Type() != TokenLit { - v.raw = append(v.raw, tok.Raw()...) - } else { - v.raw = append(v.raw, r...) - } -} - -func (v Value) String() string { - switch v.Type { - case DecimalType: - return fmt.Sprintf("decimal: %f", v.decimal) - case IntegerType: - return fmt.Sprintf("integer: %d", v.integer) - case StringType: - return fmt.Sprintf("string: %s", string(v.raw)) - case QuotedStringType: - return fmt.Sprintf("quoted string: %s", string(v.raw)) - case BoolType: - return fmt.Sprintf("bool: %t", v.boolean) - default: - return "union not set" - } -} - -func newLitToken(b []rune) (Token, int, error) { - n := 0 - var err error - - token := Token{} - if b[0] == '"' { - n, err = getStringValue(b) - if err != nil { - return token, n, err - } - - token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) - } else { - n, err = getValue(b) - token = newToken(TokenLit, b[:n], StringType) - } - - return token, n, err -} - -// IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer -} - -// FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal -} - -// BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean -} - -func isTrimmable(r rune) bool { - switch r { - case '\n', ' ': - return true - } - return false -} - -// StringValue returns the string value -func (v Value) StringValue() string { - switch v.Type { - case StringType: - return strings.TrimFunc(string(v.raw), isTrimmable) - case QuotedStringType: - // preserve all characters in the quotes - return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) - default: - return strings.TrimFunc(string(v.raw), isTrimmable) - } -} - -func contains(runes []rune, c rune) bool { - for i := 0; i < len(runes); i++ { - if runes[i] == c { - return true - } - } - - return false -} - -func runeCompare(v1 []rune, v2 []rune) bool { - if len(v1) != len(v2) { - return false - } - - for i := 0; i < len(v1); i++ { - if v1[i] != v2[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go deleted file mode 100644 index e52ac399..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go +++ /dev/null @@ -1,30 +0,0 @@ -package ini - -func isNewline(b []rune) bool { - if len(b) == 0 { - return false - } - - if b[0] == '\n' { - return true - } - - if len(b) < 2 { - return false - } - - return b[0] == '\r' && b[1] == '\n' -} - -func newNewlineToken(b []rune) (Token, int, error) { - i := 1 - if b[0] == '\r' && isNewline(b[1:]) { - i++ - } - - if !isNewline([]rune(b[:i])) { - return emptyToken, 0, NewParseError("invalid new line token") - } - - return newToken(TokenNL, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go deleted file mode 100644 index a45c0bc5..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go +++ /dev/null @@ -1,152 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" - "strconv" -) - -const ( - none = numberFormat(iota) - binary - octal - decimal - hex - exponent -) - -type numberFormat int - -// numberHelper is used to dictate what format a number is in -// and what to do for negative values. Since -1e-4 is a valid -// number, we cannot just simply check for duplicate negatives. -type numberHelper struct { - numberFormat numberFormat - - negative bool - negativeExponent bool -} - -func (b numberHelper) Exists() bool { - return b.numberFormat != none -} - -func (b numberHelper) IsNegative() bool { - return b.negative || b.negativeExponent -} - -func (b *numberHelper) Determine(c rune) error { - if b.Exists() { - return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) - } - - switch c { - case 'b': - b.numberFormat = binary - case 'o': - b.numberFormat = octal - case 'x': - b.numberFormat = hex - case 'e', 'E': - b.numberFormat = exponent - case '-': - if b.numberFormat != exponent { - b.negative = true - } else { - b.negativeExponent = true - } - case '.': - b.numberFormat = decimal - default: - return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) - } - - return nil -} - -func (b numberHelper) CorrectByte(c rune) bool { - switch { - case b.numberFormat == binary: - if !isBinaryByte(c) { - return false - } - case b.numberFormat == octal: - if !isOctalByte(c) { - return false - } - case b.numberFormat == hex: - if !isHexByte(c) { - return false - } - case b.numberFormat == decimal: - if !isDigit(c) { - return false - } - case b.numberFormat == exponent: - if !isDigit(c) { - return false - } - case b.negativeExponent: - if !isDigit(c) { - return false - } - case b.negative: - if !isDigit(c) { - return false - } - default: - if !isDigit(c) { - return false - } - } - - return true -} - -func (b numberHelper) Base() int { - switch b.numberFormat { - case binary: - return 2 - case octal: - return 8 - case hex: - return 16 - default: - return 10 - } -} - -func (b numberHelper) String() string { - buf := bytes.Buffer{} - i := 0 - - switch b.numberFormat { - case binary: - i++ - buf.WriteString(strconv.Itoa(i) + ": binary format\n") - case octal: - i++ - buf.WriteString(strconv.Itoa(i) + ": octal format\n") - case hex: - i++ - buf.WriteString(strconv.Itoa(i) + ": hex format\n") - case exponent: - i++ - buf.WriteString(strconv.Itoa(i) + ": exponent format\n") - default: - i++ - buf.WriteString(strconv.Itoa(i) + ": integer format\n") - } - - if b.negative { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative format\n") - } - - if b.negativeExponent { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go deleted file mode 100644 index 8a84c7cb..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go +++ /dev/null @@ -1,39 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - equalOp = []rune("=") - equalColonOp = []rune(":") -) - -func isOp(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '=': - return true - case ':': - return true - default: - return false - } -} - -func newOpToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '=': - tok = newToken(TokenOp, equalOp, NoneType) - case ':': - tok = newToken(TokenOp, equalColonOp, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go deleted file mode 100644 index 45728701..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go +++ /dev/null @@ -1,43 +0,0 @@ -package ini - -import "fmt" - -const ( - // ErrCodeParseError is returned when a parsing error - // has occurred. - ErrCodeParseError = "INIParseError" -) - -// ParseError is an error which is returned during any part of -// the parsing process. -type ParseError struct { - msg string -} - -// NewParseError will return a new ParseError where message -// is the description of the error. -func NewParseError(message string) *ParseError { - return &ParseError{ - msg: message, - } -} - -// Code will return the ErrCodeParseError -func (err *ParseError) Code() string { - return ErrCodeParseError -} - -// Message returns the error's message -func (err *ParseError) Message() string { - return err.msg -} - -// OrigError return nothing since there will never be any -// original error. -func (err *ParseError) OrigError() error { - return nil -} - -func (err *ParseError) Error() string { - return fmt.Sprintf("%s: %s", err.Code(), err.Message()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go deleted file mode 100644 index 7f01cf7c..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go +++ /dev/null @@ -1,60 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" -) - -// ParseStack is a stack that contains a container, the stack portion, -// and the list which is the list of ASTs that have been successfully -// parsed. -type ParseStack struct { - top int - container []AST - list []AST - index int -} - -func newParseStack(sizeContainer, sizeList int) ParseStack { - return ParseStack{ - container: make([]AST, sizeContainer), - list: make([]AST, sizeList), - } -} - -// Pop will return and truncate the last container element. -func (s *ParseStack) Pop() AST { - s.top-- - return s.container[s.top] -} - -// Push will add the new AST to the container -func (s *ParseStack) Push(ast AST) { - s.container[s.top] = ast - s.top++ -} - -// MarkComplete will append the AST to the list of completed statements -func (s *ParseStack) MarkComplete(ast AST) { - s.list[s.index] = ast - s.index++ -} - -// List will return the completed statements -func (s ParseStack) List() []AST { - return s.list[:s.index] -} - -// Len will return the length of the container -func (s *ParseStack) Len() int { - return s.top -} - -func (s ParseStack) String() string { - buf := bytes.Buffer{} - for i, node := range s.list { - buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go deleted file mode 100644 index f82095ba..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go +++ /dev/null @@ -1,41 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - emptyRunes = []rune{} -) - -func isSep(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '[', ']': - return true - default: - return false - } -} - -var ( - openBrace = []rune("[") - closeBrace = []rune("]") -) - -func newSepToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '[': - tok = newToken(TokenSep, openBrace, NoneType) - case ']': - tok = newToken(TokenSep, closeBrace, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go deleted file mode 100644 index 6bb69644..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go +++ /dev/null @@ -1,45 +0,0 @@ -package ini - -// skipper is used to skip certain blocks of an ini file. -// Currently skipper is used to skip nested blocks of ini -// files. See example below -// -// [ foo ] -// nested = ; this section will be skipped -// a=b -// c=d -// bar=baz ; this will be included -type skipper struct { - shouldSkip bool - TokenSet bool - prevTok Token -} - -func newSkipper() skipper { - return skipper{ - prevTok: emptyToken, - } -} - -func (s *skipper) ShouldSkip(tok Token) bool { - if s.shouldSkip && - s.prevTok.Type() == TokenNL && - tok.Type() != TokenWS { - - s.Continue() - return false - } - s.prevTok = tok - - return s.shouldSkip -} - -func (s *skipper) Skip() { - s.shouldSkip = true - s.prevTok = emptyToken -} - -func (s *skipper) Continue() { - s.shouldSkip = false - s.prevTok = emptyToken -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go deleted file mode 100644 index ba0af01b..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// Statement is an empty AST mostly used for transitioning states. -func newStatement() AST { - return newAST(ASTKindStatement, AST{}) -} - -// SectionStatement represents a section AST -func newSectionStatement(tok Token) AST { - return newASTWithRootToken(ASTKindSectionStatement, tok) -} - -// ExprStatement represents a completed expression AST -func newExprStatement(ast AST) AST { - return newAST(ASTKindExprStatement, ast) -} - -// CommentStatement represents a comment in the ini defintion. -// -// grammar: -// comment -> #comment' | ;comment' -// comment' -> epsilon | value -func newCommentStatement(tok Token) AST { - return newAST(ASTKindCommentStatement, newExpression(tok)) -} - -// CompletedSectionStatement represents a completed section -func newCompletedSectionStatement(ast AST) AST { - return newAST(ASTKindCompletedSectionStatement, ast) -} - -// SkipStatement is used to skip whole statements -func newSkipStatement(ast AST) AST { - return newAST(ASTKindSkipStatement, ast) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go deleted file mode 100644 index 305999d2..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go +++ /dev/null @@ -1,284 +0,0 @@ -package ini - -import ( - "fmt" -) - -// getStringValue will return a quoted string and the amount -// of bytes read -// -// an error will be returned if the string is not properly formatted -func getStringValue(b []rune) (int, error) { - if b[0] != '"' { - return 0, NewParseError("strings must start with '\"'") - } - - endQuote := false - i := 1 - - for ; i < len(b) && !endQuote; i++ { - if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { - endQuote = true - break - } else if escaped { - /*c, err := getEscapedByte(b[i]) - if err != nil { - return 0, err - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i--*/ - - continue - } - } - - if !endQuote { - return 0, NewParseError("missing '\"' in string value") - } - - return i + 1, nil -} - -// getBoolValue will return a boolean and the amount -// of bytes read -// -// an error will be returned if the boolean is not of a correct -// value -func getBoolValue(b []rune) (int, error) { - if len(b) < 4 { - return 0, NewParseError("invalid boolean value") - } - - n := 0 - for _, lv := range literalValues { - if len(lv) > len(b) { - continue - } - - if isLitValue(lv, b) { - n = len(lv) - } - } - - if n == 0 { - return 0, NewParseError("invalid boolean value") - } - - return n, nil -} - -// getNumericalValue will return a numerical string, the amount -// of bytes read, and the base of the number -// -// an error will be returned if the number is not of a correct -// value -func getNumericalValue(b []rune) (int, int, error) { - if !isDigit(b[0]) { - return 0, 0, NewParseError("invalid digit value") - } - - i := 0 - helper := numberHelper{} - -loop: - for negativeIndex := 0; i < len(b); i++ { - negativeIndex++ - - if !isDigit(b[i]) { - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return 0, 0, NewParseError("parse error '-'") - } - - n := getNegativeNumber(b[i:]) - i += (n - 1) - helper.Determine(b[i]) - continue - case '.': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - - negativeIndex = 0 - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - if i == 0 && b[i] != '0' { - return 0, 0, NewParseError("incorrect base format, expected leading '0'") - } - - if i != 1 { - return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) - } - - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - default: - if isWhitespace(b[i]) { - break loop - } - - if isNewline(b[i:]) { - break loop - } - - if !(helper.numberFormat == hex && isHexByte(b[i])) { - if i+2 < len(b) && !isNewline(b[i:i+2]) { - return 0, 0, NewParseError("invalid numerical character") - } else if !isNewline([]rune{b[i]}) { - return 0, 0, NewParseError("invalid numerical character") - } - - break loop - } - } - } - } - - return helper.Base(), i, nil -} - -// isDigit will return whether or not something is an integer -func isDigit(b rune) bool { - return b >= '0' && b <= '9' -} - -func hasExponent(v []rune) bool { - return contains(v, 'e') || contains(v, 'E') -} - -func isBinaryByte(b rune) bool { - switch b { - case '0', '1': - return true - default: - return false - } -} - -func isOctalByte(b rune) bool { - switch b { - case '0', '1', '2', '3', '4', '5', '6', '7': - return true - default: - return false - } -} - -func isHexByte(b rune) bool { - if isDigit(b) { - return true - } - return (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - -func getValue(b []rune) (int, error) { - i := 0 - - for i < len(b) { - if isNewline(b[i:]) { - break - } - - if isOp(b[i:]) { - break - } - - valid, n, err := isValid(b[i:]) - if err != nil { - return 0, err - } - - if !valid { - break - } - - i += n - } - - return i, nil -} - -// getNegativeNumber will return a negative number from a -// byte slice. This will iterate through all characters until -// a non-digit has been found. -func getNegativeNumber(b []rune) int { - if b[0] != '-' { - return 0 - } - - i := 1 - for ; i < len(b); i++ { - if !isDigit(b[i]) { - return i - } - } - - return i -} - -// isEscaped will return whether or not the character is an escaped -// character. -func isEscaped(value []rune, b rune) bool { - if len(value) == 0 { - return false - } - - switch b { - case '\'': // single quote - case '"': // quote - case 'n': // newline - case 't': // tab - case '\\': // backslash - default: - return false - } - - return value[len(value)-1] == '\\' -} - -func getEscapedByte(b rune) (rune, error) { - switch b { - case '\'': // single quote - return '\'', nil - case '"': // quote - return '"', nil - case 'n': // newline - return '\n', nil - case 't': // table - return '\t', nil - case '\\': // backslash - return '\\', nil - default: - return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) - } -} - -func removeEscapedCharacters(b []rune) []rune { - for i := 0; i < len(b); i++ { - if isEscaped(b[:i], b[i]) { - c, err := getEscapedByte(b[i]) - if err != nil { - return b - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i-- - } - } - - return b -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go deleted file mode 100644 index 94841c32..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go +++ /dev/null @@ -1,166 +0,0 @@ -package ini - -import ( - "fmt" - "sort" -) - -// Visitor is an interface used by walkers that will -// traverse an array of ASTs. -type Visitor interface { - VisitExpr(AST) error - VisitStatement(AST) error -} - -// DefaultVisitor is used to visit statements and expressions -// and ensure that they are both of the correct format. -// In addition, upon visiting this will build sections and populate -// the Sections field which can be used to retrieve profile -// configuration. -type DefaultVisitor struct { - scope string - Sections Sections -} - -// NewDefaultVisitor return a DefaultVisitor -func NewDefaultVisitor() *DefaultVisitor { - return &DefaultVisitor{ - Sections: Sections{ - container: map[string]Section{}, - }, - } -} - -// VisitExpr visits expressions... -func (v *DefaultVisitor) VisitExpr(expr AST) error { - t := v.Sections.container[v.scope] - if t.values == nil { - t.values = values{} - } - - switch expr.Kind { - case ASTKindExprStatement: - opExpr := expr.GetRoot() - switch opExpr.Kind { - case ASTKindEqualExpr: - children := opExpr.GetChildren() - if len(children) <= 1 { - return NewParseError("unexpected token type") - } - - rhs := children[1] - - if rhs.Root.Type() != TokenLit { - return NewParseError("unexpected token type") - } - - key := EqualExprKey(opExpr) - v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) - if err != nil { - return err - } - - t.values[key] = v - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - - v.Sections.container[v.scope] = t - return nil -} - -// VisitStatement visits statements... -func (v *DefaultVisitor) VisitStatement(stmt AST) error { - switch stmt.Kind { - case ASTKindCompletedSectionStatement: - child := stmt.GetRoot() - if child.Kind != ASTKindSectionStatement { - return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) - } - - name := string(child.Root.Raw()) - v.Sections.container[name] = Section{} - v.scope = name - default: - return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) - } - - return nil -} - -// Sections is a map of Section structures that represent -// a configuration. -type Sections struct { - container map[string]Section -} - -// GetSection will return section p. If section p does not exist, -// false will be returned in the second parameter. -func (t Sections) GetSection(p string) (Section, bool) { - v, ok := t.container[p] - return v, ok -} - -// values represents a map of union values. -type values map[string]Value - -// List will return a list of all sections that were successfully -// parsed. -func (t Sections) List() []string { - keys := make([]string, len(t.container)) - i := 0 - for k := range t.container { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// Section contains a name and values. This represent -// a sectioned entry in a configuration file. -type Section struct { - Name string - values values -} - -// Has will return whether or not an entry exists in a given section -func (t Section) Has(k string) bool { - _, ok := t.values[k] - return ok -} - -// ValueType will returned what type the union is set to. If -// k was not found, the NoneType will be returned. -func (t Section) ValueType(k string) (ValueType, bool) { - v, ok := t.values[k] - return v.Type, ok -} - -// Bool returns a bool value at k -func (t Section) Bool(k string) bool { - return t.values[k].BoolValue() -} - -// Int returns an integer value at k -func (t Section) Int(k string) int64 { - return t.values[k].IntValue() -} - -// Float64 returns a float value at k -func (t Section) Float64(k string) float64 { - return t.values[k].FloatValue() -} - -// String returns the string value at k -func (t Section) String(k string) string { - _, ok := t.values[k] - if !ok { - return "" - } - return t.values[k].StringValue() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go deleted file mode 100644 index 99915f7f..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go +++ /dev/null @@ -1,25 +0,0 @@ -package ini - -// Walk will traverse the AST using the v, the Visitor. -func Walk(tree []AST, v Visitor) error { - for _, node := range tree { - switch node.Kind { - case ASTKindExpr, - ASTKindExprStatement: - - if err := v.VisitExpr(node); err != nil { - return err - } - case ASTKindStatement, - ASTKindCompletedSectionStatement, - ASTKindNestedSectionStatement, - ASTKindCompletedNestedSectionStatement: - - if err := v.VisitStatement(node); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go deleted file mode 100644 index 7ffb4ae0..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -import ( - "unicode" -) - -// isWhitespace will return whether or not the character is -// a whitespace character. -// -// Whitespace is defined as a space or tab. -func isWhitespace(c rune) bool { - return unicode.IsSpace(c) && c != '\n' && c != '\r' -} - -func newWSToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if !isWhitespace(b[i]) { - break - } - } - - return newToken(TokenWS, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go deleted file mode 100644 index 2b42cbe6..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go +++ /dev/null @@ -1,9 +0,0 @@ -package sdk - -// Invalidator provides access to a type's invalidate method to make it -// invalidate it cache. -// -// e.g aws.SafeCredentialsProvider's Invalidate method. -type Invalidator interface { - Invalidate() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go deleted file mode 100644 index e5640abd..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go +++ /dev/null @@ -1,44 +0,0 @@ -package sdk - -import ( - "context" - "time" -) - -func init() { - NowTime = time.Now - Sleep = time.Sleep - SleepWithContext = DefaultSleepWithContext -} - -// NowTime is a value for getting the current time. This value can be overriden -// for testing mocking out current time. -var NowTime func() time.Time - -// Sleep is a value for sleeping for a duration. This value can be overriden -// for testing and mocking out sleep duration. -var Sleep func(time.Duration) - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// This value can be overriden for testing and mocking out sleep duration. -var SleepWithContext func(context.Context, time.Duration) error - -// DefaultSleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -func DefaultSleepWithContext(ctx context.Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/encode.go deleted file mode 100644 index dc3d1296..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/encode.go +++ /dev/null @@ -1,95 +0,0 @@ -package protocol - -import ( - "io" -) - -// A FieldMarshaler interface is used to marshal struct fields when encoding. -type FieldMarshaler interface { - MarshalFields(FieldEncoder) error -} - -// FieldMarshalerFunc is a helper utility that wrapps a function and allows -// that function to be called as a FieldMarshaler. -type FieldMarshalerFunc func(FieldEncoder) error - -// MarshalFields will call the underlying function passing in the field encoder -// with the protocol field encoder. -func (fn FieldMarshalerFunc) MarshalFields(e FieldEncoder) error { - return fn(e) -} - -// ValueMarshaler provides a generic type for all encoding field values to be -// passed into a encoder's methods with. -type ValueMarshaler interface { - MarshalValue() (string, error) - MarshalValueBuf([]byte) ([]byte, error) -} - -// A StreamMarshaler interface is used to marshal a stream when encoding. -type StreamMarshaler interface { - MarshalStream() (io.ReadSeeker, error) -} - -// MarshalListValues is a marshaler for list encoders. -type MarshalListValues interface { - MarshalValues(enc ListEncoder) error -} - -// MapMarshaler is a marshaler for map encoders. -type MapMarshaler interface { - MarshalValues(enc MapEncoder) error -} - -// A ListEncoder provides the interface for encoders that will encode List elements. -type ListEncoder interface { - Start() - End() - - Map() MapEncoder - List() ListEncoder - - ListAddValue(v ValueMarshaler) - ListAddFields(m FieldMarshaler) -} - -// A MapEncoder provides the interface for encoders that will encode map elements. -type MapEncoder interface { - Start() - End() - - Map(k string) MapEncoder - List(k string) ListEncoder - - MapSetValue(k string, v ValueMarshaler) - MapSetFields(k string, m FieldMarshaler) -} - -// A FieldEncoder provides the interface for encoding struct field members. -type FieldEncoder interface { - SetValue(t Target, k string, m ValueMarshaler, meta Metadata) - SetStream(t Target, k string, m StreamMarshaler, meta Metadata) - SetFields(t Target, k string, m FieldMarshaler, meta Metadata) - - Map(t Target, k string, meta Metadata) MapEncoder - List(t Target, k string, meta Metadata) ListEncoder -} - -// A FieldBuffer provides buffering of fields so the number of -// allocations are reduced by providng a persistent buffer that is -// used between fields. -type FieldBuffer struct { - buf []byte -} - -// GetValue will retrieve the ValueMarshaler's value by appending the -// value to the buffer. Will return the buffer that was populated. -// -// This buffer is only valid until the next time GetValue is called. -func (b *FieldBuffer) GetValue(m ValueMarshaler) ([]byte, error) { - v, err := m.MarshalValueBuf(b.buf) - if len(v) > len(b.buf) { - b.buf = v - } - return v, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/fields.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/fields.go deleted file mode 100644 index b131e224..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/fields.go +++ /dev/null @@ -1,197 +0,0 @@ -package protocol - -import ( - "bytes" - "encoding/base64" - "io" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// QuotedValue represents a type that should be quoted when encoding -// a string value. -type QuotedValue struct { - ValueMarshaler -} - -// BoolValue provies encoding of bool for AWS protocols. -type BoolValue bool - -// MarshalValue formats the value into a string for encoding. -func (v BoolValue) MarshalValue() (string, error) { - return strconv.FormatBool(bool(v)), nil -} - -// MarshalValueBuf formats the value into a byte slice for encoding. -// If there is enough room in the passed in slice v will be appended to it. -// -// Will reset the length of the passed in slice to 0. -func (v BoolValue) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return strconv.AppendBool(b, bool(v)), nil -} - -// BytesValue provies encoding of string for AWS protocols. -type BytesValue string - -// MarshalValue formats the value into a string for encoding. -func (v BytesValue) MarshalValue() (string, error) { - return base64.StdEncoding.EncodeToString([]byte(v)), nil -} - -// MarshalValueBuf formats the value into a byte slice for encoding. -// If there is enough room in the passed in slice v will be appended to it. -func (v BytesValue) MarshalValueBuf(b []byte) ([]byte, error) { - m := []byte(v) - - n := base64.StdEncoding.EncodedLen(len(m)) - if len(b) < n { - b = make([]byte, n) - } - base64.StdEncoding.Encode(b, m) - return b[:n], nil -} - -// StringValue provies encoding of string for AWS protocols. -type StringValue string - -// MarshalValue formats the value into a string for encoding. -func (v StringValue) MarshalValue() (string, error) { - return string(v), nil -} - -// MarshalValueBuf formats the value into a byte slice for encoding. -// If there is enough room in the passed in slice v will be appended to it. -// -// Will reset the length of the passed in slice to 0. -func (v StringValue) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return append(b, v...), nil -} - -// Int64Value provies encoding of int64 for AWS protocols. -type Int64Value int64 - -// MarshalValue formats the value into a string for encoding. -func (v Int64Value) MarshalValue() (string, error) { - return strconv.FormatInt(int64(v), 10), nil -} - -// MarshalValueBuf formats the value into a byte slice for encoding. -// If there is enough room in the passed in slice v will be appended to it. -// -// Will reset the length of the passed in slice to 0. -func (v Int64Value) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return strconv.AppendInt(b, int64(v), 10), nil -} - -// Float64Value provies encoding of float64 for AWS protocols. -type Float64Value float64 - -// MarshalValue formats the value into a string for encoding. -func (v Float64Value) MarshalValue() (string, error) { - return strconv.FormatFloat(float64(v), 'f', -1, 64), nil -} - -// MarshalValueBuf formats the value into a byte slice for encoding. -// If there is enough room in the passed in slice v will be appended to it. -// -// Will reset the length of the passed in slice to 0. -func (v Float64Value) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return strconv.AppendFloat(b, float64(v), 'f', -1, 64), nil -} - -// JSONValue provies encoding of aws.JSONValues for AWS protocols. -type JSONValue struct { - V aws.JSONValue - EscapeMode EscapeMode -} - -// MarshalValue formats the value into a string for encoding. -func (v JSONValue) MarshalValue() (string, error) { - return EncodeJSONValue(v.V, v.EscapeMode) -} - -// MarshalValueBuf formats the value into a byte slice for encoding. -// If there is enough room in the passed in slice v will be appended to it. -// -// Will reset the length of the passed in slice to 0. -func (v JSONValue) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - - m, err := EncodeJSONValue(v.V, v.EscapeMode) - if err != nil { - return nil, err - } - - return append(b, []byte(m)...), nil -} - -// Time formats for protocol time fields. -const ( - ISO8601TimeFormat = "2006-01-02T15:04:05Z" // ISO 8601 formated time. - RFC822TimeFromat = "Mon, 2 Jan 2006 15:04:05 GMT" // RFC822 formatted time. - UnixTimeFormat = "unix time format" // Special case for Unix time -) - -// TimeValue provies encoding of time.Time for AWS protocols. -type TimeValue struct { - V time.Time - Format string -} - -// MarshalValue formats the value into a string givin a format for encoding. -func (v TimeValue) MarshalValue() (string, error) { - t := time.Time(v.V) - - if v.Format == UnixTimeFormat { - return strconv.FormatInt(t.UTC().Unix(), 10), nil - } - return t.UTC().Format(v.Format), nil -} - -// MarshalValueBuf formats the value into a byte slice for encoding. -// If there is enough room in the passed in slice v will be appended to it. -// -// Will reset the length of the passed in slice to 0. -func (v TimeValue) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - - m, err := v.MarshalValue() - if err != nil { - return nil, err - } - - return append(b, m...), nil -} - -// A ReadSeekerStream wrapps an io.ReadSeeker to be used as a StreamMarshaler. -type ReadSeekerStream struct { - V io.ReadSeeker -} - -// MarshalStream returns the wrapped io.ReadSeeker for encoding. -func (v ReadSeekerStream) MarshalStream() (io.ReadSeeker, error) { - return v.V, nil -} - -// A BytesStream aliases a byte slice to be used as a StreamMarshaler. -type BytesStream []byte - -// MarshalStream marshals a byte slice into an io.ReadSeeker for encoding. -func (v BytesStream) MarshalStream() (io.ReadSeeker, error) { - return bytes.NewReader([]byte(v)), nil -} - -// A StringStream aliases a string to be used as a StreamMarshaler. -type StringStream string - -// MarshalStream marshals a string into an io.ReadSeeker for encoding. -func (v StringStream) MarshalStream() (io.ReadSeeker, error) { - return strings.NewReader(string(v)), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/header_encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/header_encoder.go deleted file mode 100644 index 38bae691..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/header_encoder.go +++ /dev/null @@ -1,116 +0,0 @@ -package protocol - -import ( - "fmt" - "net/http" -) - -// HeaderMapEncoder builds a map valu -type HeaderMapEncoder struct { - Prefix string - Header http.Header - Err error -} - -// MapSetValue adds a single value to the header. -func (e *HeaderMapEncoder) MapSetValue(k string, v ValueMarshaler) { - if e.Err != nil { - return - } - - str, err := v.MarshalValue() - if err != nil { - e.Err = err - return - } - - if len(e.Prefix) > 0 { - k = e.Prefix + k - } - - e.Header.Set(k, str) -} - -// List executes the passed in callback with a list encoder based on -// the context of this HeaderMapEncoder. -func (e *HeaderMapEncoder) List(k string) ListEncoder { - if e.Err != nil { - return nil - } - - if len(e.Prefix) > 0 { - k = e.Prefix + k - } - - return &HeaderListEncoder{Key: k, Header: e.Header} -} - -// Map sets the header element with nested maps appending the -// passed in k to the prefix if one was set. -func (e *HeaderMapEncoder) Map(k string) MapEncoder { - if e.Err != nil { - return nil - } - - if len(e.Prefix) > 0 { - k = e.Prefix + k - } - - return &HeaderMapEncoder{Prefix: k, Header: e.Header} -} - -// Start does nothing for header encodings. -func (e *HeaderMapEncoder) Start() {} - -// End does nothing for header encodings. -func (e *HeaderMapEncoder) End() {} - -// MapSetFields Is not implemented, query map of FieldMarshaler is undefined. -func (e *HeaderMapEncoder) MapSetFields(k string, m FieldMarshaler) { - e.Err = fmt.Errorf("header map encoder MapSetFields not supported, %s", k) -} - -// HeaderListEncoder will encode list values nested into a header key. -type HeaderListEncoder struct { - Key string - Header http.Header - Err error -} - -// ListAddValue encodes an individual list value into the header. -func (e *HeaderListEncoder) ListAddValue(v ValueMarshaler) { - if e.Err != nil { - return - } - - str, err := v.MarshalValue() - if err != nil { - e.Err = err - return - } - - e.Header.Add(e.Key, str) -} - -// List Is not implemented, header list of list is undefined. -func (e *HeaderListEncoder) List() ListEncoder { - e.Err = fmt.Errorf("header list encoder ListAddList not supported, %s", e.Key) - return nil -} - -// Map Is not implemented, header list of map is undefined. -func (e *HeaderListEncoder) Map() MapEncoder { - e.Err = fmt.Errorf("header list encoder ListAddMap not supported, %s", e.Key) - return nil -} - -// Start does nothing for header list encodings. -func (e *HeaderListEncoder) Start() {} - -// End does nothing for header list encodings. -func (e *HeaderListEncoder) End() {} - -// ListAddFields Is not implemented, query list of FieldMarshaler is undefined. -func (e *HeaderListEncoder) ListAddFields(m FieldMarshaler) { - e.Err = fmt.Errorf("header list encoder ListAddFields not supported, %s", e.Key) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/host.go deleted file mode 100644 index 14ed78d5..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/host.go +++ /dev/null @@ -1,68 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// ValidateEndpointHostHandler is a request handler that will validate the -// request endpoint's hosts is a valid RFC 3986 host. -var ValidateEndpointHostHandler = aws.NamedHandler{ - Name: "awssdk.protocol.ValidateEndpointHostHandler", - Fn: func(r *aws.Request) { - err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) - if err != nil { - r.Error = err - } - }, -} - -// ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. -func ValidateEndpointHost(opName, host string) error { - paramErrs := aws.ErrInvalidParams{Context: opName} - labels := strings.Split(host, ".") - - for i, label := range labels { - if i == len(labels)-1 && len(label) == 0 { - // Allow trailing dot for FQDN hosts. - continue - } - - if !ValidHostLabel(label) { - paramErrs.Add(aws.NewErrParamFormat( - "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) - } - } - - if len(host) > 255 { - paramErrs.Add(aws.NewErrParamMaxLen( - "endpoint host", 255, host, - )) - } - - if paramErrs.Len() > 0 { - return paramErrs - } - return nil -} - -// ValidHostLabel returns if the label is a valid RFC 3986 host label. -func ValidHostLabel(label string) bool { - if l := len(label); l == 0 || l > 63 { - return false - } - for _, r := range label { - switch { - case r >= '0' && r <= '9': - case r >= 'A' && r <= 'Z': - case r >= 'a' && r <= 'z': - case r == '-': - default: - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/host_prefix.go deleted file mode 100644 index 2018867c..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/host_prefix.go +++ /dev/null @@ -1,49 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// HostPrefixHandlerName is the handler name for the host prefix request -// handler. -const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" - -// NewHostPrefixHandler constructs a build handler -func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) aws.NamedHandler { - builder := HostPrefixBuilder{ - Prefix: prefix, - LabelsFn: labelsFn, - } - return aws.NamedHandler{ - Name: HostPrefixHandlerName, - Fn: builder.Build, - } -} - -// HostPrefixBuilder provides the request handler to expand and prepend -// the host prefix into the operation's request endpoint host. -type HostPrefixBuilder struct { - Prefix string - LabelsFn func() map[string]string -} - -// Build updates the passed in Request with the HostPrefix template expanded. -func (h HostPrefixBuilder) Build(r *aws.Request) { - if r.Config.DisableEndpointHostPrefix { - return - } - var labels map[string]string - if h.LabelsFn != nil { - labels = h.LabelsFn() - } - prefix := h.Prefix - for name, value := range labels { - prefix = strings.Replace(prefix, "{"+name+"}", value, -1) - } - r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host - if len(r.HTTPRequest.Host) > 0 { - r.HTTPRequest.Host = prefix + r.HTTPRequest.Host - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/idempotency.go deleted file mode 100644 index 53831dff..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/idempotency.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "fmt" - "reflect" -) - -// RandReader is the random reader the protocol package will use to read -// random bytes from. This is exported for testing, and should not be used. -var RandReader = rand.Reader - -const idempotencyTokenFillTag = `idempotencyToken` - -// CanSetIdempotencyToken returns true if the struct field should be -// automatically populated with a Idempotency token. -// -// Only *string and string type fields that are tagged with idempotencyToken -// which are not already set can be auto filled. -func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { - switch u := v.Interface().(type) { - // To auto fill an Idempotency token the field must be a string, - // tagged for auto fill, and have a zero value. - case *string: - return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - case string: - return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - } - - return false -} - -// GetIdempotencyToken returns a randomly generated idempotency token. -func GetIdempotencyToken() string { - b := make([]byte, 16) - RandReader.Read(b) - - return UUIDVersion4(b) -} - -// SetIdempotencyToken will set the value provided with a Idempotency Token. -// Given that the value can be set. Will panic if value is not setable. -func SetIdempotencyToken(v reflect.Value) { - if v.Kind() == reflect.Ptr { - if v.IsNil() && v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = reflect.Indirect(v) - - if !v.CanSet() { - panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) - } - - b := make([]byte, 16) - _, err := rand.Read(b) - if err != nil { - // TODO handle error - return - } - - v.Set(reflect.ValueOf(UUIDVersion4(b))) -} - -// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided -func UUIDVersion4(u []byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - // 13th character is "4" - u[6] = (u[6] | 0x40) & 0x4F - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] | 0x80) & 0xBF - - return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/encode.go deleted file mode 100644 index 641b05f5..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/encode.go +++ /dev/null @@ -1,290 +0,0 @@ -package json - -import ( - "bytes" - "fmt" - "io" - - "github.com/aws/aws-sdk-go-v2/private/protocol" -) - -// An Encoder provides encoding of the AWS JSON protocol. This encoder will will -// write all content to JSON. Only supports body and payload targets. -type Encoder struct { - encoder - root bool -} - -// NewEncoder creates a new encoder for encoding AWS JSON protocol. Only encodes -// fields into the JSON body, and error is returned if target is anything other -// than Body or Payload. -func NewEncoder() *Encoder { - buf := bytes.NewBuffer([]byte{'{'}) - e := &Encoder{ - encoder: encoder{ - buf: buf, - parent: nil, - fieldBuf: &protocol.FieldBuffer{}, - }, - root: true, - } - - return e -} - -// Encode returns the encoded XMl reader. An error will be returned if one was -// encountered while building the JSON body. -func (e *Encoder) Encode() (io.ReadSeeker, error) { - b, err := e.encode() - if err != nil { - return nil, err - } - - if len(b) == 2 { - // Account for first starting object in buffer - return nil, nil - } - - return bytes.NewReader(b), nil -} - -// SetValue sets an individual value to the JSON body. -func (e *Encoder) SetValue(t protocol.Target, k string, v protocol.ValueMarshaler, meta protocol.Metadata) { - e.encoder.writeSep() - e.writeKey(k) - e.writeValue(v) -} - -// SetStream is not supported for JSON protocol marshaling. -func (e *Encoder) SetStream(t protocol.Target, k string, v protocol.StreamMarshaler, meta protocol.Metadata) { - if e.err != nil { - return - } - e.err = fmt.Errorf("json encoder SetStream not supported, %s, %s", t, k) -} - -// Map will return a new mapEncoder and create a new scope for the map encoding. -func (e *Encoder) Map(t protocol.Target, k string, meta protocol.Metadata) protocol.MapEncoder { - temp := newScope(e.encoder, &e.encoder) - return &mapEncoder{temp, k} -} - -// List will return a new listEncoder and create a new scope for the list encoding. -func (e *Encoder) List(t protocol.Target, k string, meta protocol.Metadata) protocol.ListEncoder { - temp := newScope(e.encoder, &e.encoder) - return &listEncoder{temp, k} -} - -// SetFields sets the nested fields to the JSON body. -func (e *Encoder) SetFields(t protocol.Target, k string, m protocol.FieldMarshaler, meta protocol.Metadata) { - if t == protocol.PayloadTarget { - // Ignore payload key and only marshal body without wrapping in object first. - nested := Encoder{ - encoder: encoder{ - buf: e.encoder.buf, - fieldBuf: e.encoder.fieldBuf, - }, - } - m.MarshalFields(&nested) - e.err = nested.err - return - } - - e.writeSep() - e.writeKey(k) - e.writeObject(func(enc encoder) error { - temp := newScope(enc, &e.encoder) - nested := Encoder{encoder: temp} - m.MarshalFields(&nested) - return nested.err - }) -} - -// A listEncoder encodes elements within a list for the JSON encoder. -type listEncoder struct { - encoder - k string -} - -// Map return a new mapEncoder while creating a new scope for the encoder. -func (e *listEncoder) Map() protocol.MapEncoder { - temp := newScope(e.encoder, &e.encoder) - return &mapEncoder{temp, ""} -} - -// List return a new listEncoder while creating a new scope for the encoder. -func (e *listEncoder) List() protocol.ListEncoder { - temp := newScope(e.encoder, &e.encoder) - return &listEncoder{temp, ""} -} - -// Start will open a new scope for a list and write the given key. -func (e *listEncoder) Start() { - e.encoder.parent.writeSep() - e.writeKey(e.k) - e.WriteListStart() -} - -// End will close the list. -func (e *listEncoder) End() { - e.WriteListEnd() -} - -// ListAddValue will add the value to the list. -func (e *listEncoder) ListAddValue(v protocol.ValueMarshaler) { - e.encoder.writeSep() - e.writeValue(v) -} - -// ListAddFields will set the nested type's fields to the list. -func (e *listEncoder) ListAddFields(m protocol.FieldMarshaler) { - e.encoder.writeSep() - e.writeObject(func(enc encoder) error { - temp := newScope(enc, &e.encoder) - nested := Encoder{encoder: temp} - m.MarshalFields(&nested) - return nested.err - }) -} - -// A mapEncoder encodes key values pair map values for the JSON encoder. -type mapEncoder struct { - encoder encoder - k string -} - -// Start will open a new scope for a list and write the given key. -func (e *mapEncoder) Start() { - e.encoder.parent.writeSep() - e.encoder.writeKey(e.k) - e.encoder.WriteMapStart() -} - -// End will close the list. -func (e *mapEncoder) End() { - e.encoder.WriteMapEnd() -} - -// Map will create a new scope and return a mapEncoder. -func (e *mapEncoder) Map(k string) protocol.MapEncoder { - temp := newScope(e.encoder, &e.encoder) - return &mapEncoder{temp, k} -} - -// List will create a new scope and return a listEncoder -func (e *mapEncoder) List(k string) protocol.ListEncoder { - temp := newScope(e.encoder, &e.encoder) - return &listEncoder{temp, k} -} - -// MapSetValue sets a map value. -func (e *mapEncoder) MapSetValue(k string, v protocol.ValueMarshaler) { - e.encoder.writeSep() - e.encoder.writeKey(k) - e.encoder.writeValue(v) -} - -// MapSetFields will set the nested type's fields under the map. -func (e *mapEncoder) MapSetFields(k string, m protocol.FieldMarshaler) { - e.encoder.writeSep() - e.encoder.writeKey(k) - e.encoder.writeObject(func(enc encoder) error { - temp := newScope(enc, &e.encoder) - nested := Encoder{encoder: temp} - m.MarshalFields(&nested) - return nested.err - }) -} - -type encoder struct { - buf *bytes.Buffer - fieldBuf *protocol.FieldBuffer - started bool - parent *encoder - err error -} - -func (e encoder) encode() ([]byte, error) { - if e.err != nil { - return nil, e.err - } - - // Close the root object - e.buf.WriteByte('}') - - return e.buf.Bytes(), nil -} - -func (e *encoder) writeKey(k string) { - e.buf.WriteByte('"') - e.buf.WriteString(k) - e.buf.WriteByte('"') - e.buf.WriteByte(':') -} - -func (e *encoder) writeValue(v protocol.ValueMarshaler) { - if e.err != nil { - return - } - - b, err := e.fieldBuf.GetValue(v) - if err != nil { - e.err = err - return - } - - var asStr bool - switch v.(type) { - case protocol.QuotedValue: - asStr = true - } - - if asStr { - escapeStringBytes(e.buf, b) - } else { - e.buf.Write(b) - } -} - -func (e *encoder) writeObject(fn func(encoder) error) { - if e.err != nil { - return - } - - e.buf.WriteByte('{') - e.err = fn(*e) - e.buf.WriteByte('}') -} - -func (e *encoder) WriteListStart() { - e.buf.WriteByte('[') -} - -func (e *encoder) WriteListEnd() { - e.buf.WriteByte(']') -} - -func (e *encoder) WriteMapStart() { - e.buf.WriteByte('{') -} - -func (e *encoder) WriteMapEnd() { - e.buf.WriteByte('}') -} - -func (e *encoder) writeSep() { - if e.started { - e.buf.WriteByte(',') - } else { - e.started = true - } - -} - -// newScope will return a new encoder with the correct parent and started to false. -func newScope(e encoder, parent *encoder) encoder { - temp := e - temp.started = false - temp.parent = parent - return temp -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/escape.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/escape.go deleted file mode 100644 index d984d0cd..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/escape.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and modified from Go 1.8 stdlib's encoding/json/#safeSet - -package json - -import ( - "bytes" - "unicode/utf8" -) - -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} - -// copied from Go 1.8 stdlib's encoding/json/#hex -var hex = "0123456789abcdef" - -// escapeStringBytes escapes and writes the passed in string bytes to the dst -// buffer -// -// Copied and modifed from Go 1.8 stdlib's encodeing/json/#encodeState.stringBytes -func escapeStringBytes(e *bytes.Buffer, s []byte) { - e.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if safeSet[b] { - i++ - continue - } - if start < i { - e.Write(s[start:i]) - } - switch b { - case '\\', '"': - e.WriteByte('\\') - e.WriteByte(b) - case '\n': - e.WriteByte('\\') - e.WriteByte('n') - case '\r': - e.WriteByte('\\') - e.WriteByte('r') - case '\t': - e.WriteByte('\\') - e.WriteByte('t') - default: - // This encodes bytes < 0x20 except for \t, \n and \r. - // If escapeHTML is set, it also escapes <, >, and & - // because they can lead to security holes when - // user-controlled strings are rendered into JSON - // and served to some browsers. - e.WriteString(`\u00`) - e.WriteByte(hex[b>>4]) - e.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - e.Write(s[start:i]) - } - e.WriteString(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. - // U+2029 is PARAGRAPH SEPARATOR. - // They are both technically valid characters in JSON strings, - // but don't work in JSONP, which has to be evaluated as JavaScript, - // and can lead to security holes there. It is valid JSON to - // escape them, so we do so unconditionally. - // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. - if c == '\u2028' || c == '\u2029' { - if start < i { - e.Write(s[start:i]) - } - e.WriteString(`\u202`) - e.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - e.Write(s[start:]) - } - e.WriteByte('"') -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/build.go deleted file mode 100644 index 029be321..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/build.go +++ /dev/null @@ -1,293 +0,0 @@ -// Package jsonutil provides JSON serialization of AWS requests and responses. -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/private/protocol" -) - -var timeType = reflect.ValueOf(time.Time{}).Type() -var byteSliceType = reflect.ValueOf([]byte{}).Type() - -// BuildJSON builds a JSON string for a given object v. -func BuildJSON(v interface{}) ([]byte, error) { - var buf bytes.Buffer - - err := buildAny(reflect.ValueOf(v), &buf, "", false) - return buf.Bytes(), err -} - -func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag, parentCollection bool) error { - origVal := value - value = reflect.Indirect(value) - if !value.IsValid() { - return nil - } - - vtype := value.Type() - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if value.Type() != timeType { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return buildStruct(value, buf, tag) - case "list": - return buildList(value, buf, tag) - case "map": - return buildMap(value, buf, tag) - default: - return buildScalar(origVal, buf, tag, parentCollection) - } -} - -func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - buf.WriteByte('{') - - t := value.Type() - first := true - for i := 0; i < t.NumField(); i++ { - member := value.Field(i) - - // This allocates the most memory. - // Additionally, we cannot skip nil fields due to - // idempotency auto filling. - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("json") == "-" { - continue - } - if field.Tag.Get("location") != "" { - continue // ignore non-body elements - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(member, field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(&token) - } - - if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { - continue // ignore unset fields - } else if member.Kind() == reflect.String && member.Len() == 0 { - continue - } - - if first { - first = false - } else { - buf.WriteByte(',') - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - writeString(name, buf) - buf.WriteString(`:`) - - err := buildAny(member, buf, field.Tag, false) - if err != nil { - return err - } - - } - - buf.WriteString("}") - - return nil -} - -func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("[") - - for i := 0; i < value.Len(); i++ { - elem := value.Index(i) - buildAny(elem, buf, "", true) - - if i < value.Len()-1 { - buf.WriteString(",") - } - } - - buf.WriteString("]") - - return nil -} - -type sortedValues []reflect.Value - -func (sv sortedValues) Len() int { return len(sv) } -func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } - -func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("{") - - sv := sortedValues(value.MapKeys()) - sort.Sort(sv) - - for i, k := range sv { - if i > 0 { - buf.WriteByte(',') - } - - writeString(k.String(), buf) - buf.WriteString(`:`) - - buildAny(value.MapIndex(k), buf, "", true) - } - - buf.WriteString("}") - - return nil -} - -func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag, parentCollection bool) error { - // prevents allocation on the heap. - scratch := [64]byte{} - switch value := reflect.Indirect(v); value.Kind() { - case reflect.String: - str := value.String() - isEnum := len(tag.Get("enum")) != 0 - if parentCollection || (len(str) > 0 && isEnum) || !isEnum { - writeString(str, buf) - } - case reflect.Bool: - if value.Bool() { - buf.WriteString("true") - } else { - buf.WriteString("false") - } - case reflect.Int64: - buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) - case reflect.Float64: - f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} - } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) - default: - switch converted := value.Interface().(type) { - case time.Time: - buf.Write(strconv.AppendInt(scratch[:0], converted.UTC().Unix(), 10)) - case []byte: - if !value.IsNil() { - buf.WriteByte('"') - if len(converted) < 1024 { - // for small buffers, using Encode directly is much faster. - dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) - base64.StdEncoding.Encode(dst, converted) - buf.Write(dst) - } else { - // for large buffers, avoid unnecessary extra temporary - // buffer space. - enc := base64.NewEncoder(base64.StdEncoding, buf) - enc.Write(converted) - enc.Close() - } - buf.WriteByte('"') - } - case aws.JSONValue: - str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) - if err != nil { - return fmt.Errorf("unable to encode JSONValue, %v", err) - } - buf.WriteString(str) - default: - return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) - } - } - return nil -} - -var hex = "0123456789abcdef" - -func writeString(s string, buf *bytes.Buffer) { - buf.WriteByte('"') - for i := 0; i < len(s); i++ { - if s[i] == '"' { - buf.WriteString(`\"`) - } else if s[i] == '\\' { - buf.WriteString(`\\`) - } else if s[i] == '\b' { - buf.WriteString(`\b`) - } else if s[i] == '\f' { - buf.WriteString(`\f`) - } else if s[i] == '\r' { - buf.WriteString(`\r`) - } else if s[i] == '\t' { - buf.WriteString(`\t`) - } else if s[i] == '\n' { - buf.WriteString(`\n`) - } else if s[i] < 32 { - buf.WriteString("\\u00") - buf.WriteByte(hex[s[i]>>4]) - buf.WriteByte(hex[s[i]&0xF]) - } else { - buf.WriteByte(s[i]) - } - } - buf.WriteByte('"') -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/unmarshal.go deleted file mode 100644 index dce3a67b..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/unmarshal.go +++ /dev/null @@ -1,242 +0,0 @@ -package jsonutil - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "reflect" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/private/protocol" -) - -// UnmarshalJSON reads a stream and unmarshals the results in object v. -func UnmarshalJSON(v interface{}, stream io.Reader) error { - var out interface{} - - b, err := ioutil.ReadAll(stream) - if err != nil { - return err - } - - if len(b) == 0 { - return nil - } - - if err := json.Unmarshal(b, &out); err != nil { - return err - } - - return unmarshalAny(reflect.ValueOf(v), out, "") -} - -func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { - vtype := value.Type() - if vtype.Kind() == reflect.Ptr { - vtype = vtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - _, tok := value.Interface().(*time.Time) - if _, ok := value.Interface().(time.Time); !(ok || tok) { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return unmarshalStruct(value, data, tag) - case "list": - return unmarshalList(value, data, tag) - case "map": - return unmarshalMap(value, data, tag) - default: - return unmarshalScalar(value, data, tag) - } -} - -func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a structure (%#v)", data) - } - - t := value.Type() - if value.Kind() == reflect.Ptr { - if value.IsNil() { // create the structure if it's nil - s := reflect.New(value.Type().Elem()) - value.Set(s) - value = s - } - - value = value.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return unmarshalAny(value.FieldByName(payload), data, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if field.PkgPath != "" { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - member := value.FieldByIndex(field.Index) - err := unmarshalAny(member, mapData[name], field.Tag) - if err != nil { - return err - } - } - return nil -} - -func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - listData, ok := data.([]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a list (%#v)", data) - } - - if value.IsNil() { - l := len(listData) - value.Set(reflect.MakeSlice(value.Type(), l, l)) - } - - for i, c := range listData { - err := unmarshalAny(value.Index(i), c, "") - if err != nil { - return err - } - } - - return nil -} - -func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a map (%#v)", data) - } - - if value.IsNil() { - value.Set(reflect.MakeMap(value.Type())) - } - - for k, v := range mapData { - kvalue := reflect.ValueOf(k) - vvalue := reflect.New(value.Type().Elem()).Elem() - unmarshalAny(vvalue, v, "") - value.SetMapIndex(kvalue, vvalue) - } - - return nil -} - -func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { - errf := func() error { - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - - switch d := data.(type) { - case nil: - return nil // nothing to do here - case string: - if value.Kind() == reflect.String { - value.SetString(d) - return nil - } - - switch value.Interface().(type) { - case *string: - value.Set(reflect.ValueOf(&d)) - case string: - value.Set(reflect.ValueOf(d)) - case []byte: - b, err := base64.StdEncoding.DecodeString(d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(b)) - case aws.JSONValue: - // No need to use escaping as the value is a non-quoted string. - v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) - if err != nil { - return err - } - value.Set(reflect.ValueOf(v)) - default: - return errf() - } - case float64: - switch value.Interface().(type) { - case *int64: - di := int64(d) - value.Set(reflect.ValueOf(&di)) - case int64: - di := int64(d) - value.Set(reflect.ValueOf(di)) - case *float64: - value.Set(reflect.ValueOf(&d)) - case float64: - value.Set(reflect.ValueOf(d)) - case *time.Time: - t := time.Unix(int64(d), 0).UTC() - value.Set(reflect.ValueOf(&t)) - case time.Time: - t := time.Unix(int64(d), 0).UTC() - value.Set(reflect.ValueOf(t)) - default: - return errf() - } - case bool: - switch value.Interface().(type) { - case *bool: - value.Set(reflect.ValueOf(&d)) - default: - return errf() - } - default: - return fmt.Errorf("unsupported JSON value (%v)", data) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc/jsonrpc.go deleted file mode 100644 index d2d4f829..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc/jsonrpc.go +++ /dev/null @@ -1,109 +0,0 @@ -// Package jsonrpc provides JSON RPC utilities for serialization of AWS -// requests and responses. -package jsonrpc - -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go - -import ( - "encoding/json" - "io/ioutil" - "strings" - - request "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil" - "github.com/aws/aws-sdk-go-v2/private/protocol/rest" -) - -var emptyJSON = []byte("{}") - -// BuildHandler is a named request handler for building jsonrpc protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build} - -// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta} - -// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError} - -// Build builds a JSON payload for a JSON RPC request. -func Build(req *request.Request) { - var buf []byte - var err error - if req.ParamsFilled() { - buf, err = jsonutil.BuildJSON(req.Params) - if err != nil { - req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) - return - } - } else { - buf = emptyJSON - } - - if req.Metadata.TargetPrefix != "" || string(buf) != "{}" { - req.SetBufferBody(buf) - } - - if req.Metadata.TargetPrefix != "" { - target := req.Metadata.TargetPrefix + "." + req.Operation.Name - req.HTTPRequest.Header.Add("X-Amz-Target", target) - } - if req.Metadata.JSONVersion != "" { - jsonVersion := req.Metadata.JSONVersion - req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion) - } -} - -// Unmarshal unmarshals a response for a JSON RPC service. -func Unmarshal(req *request.Request) { - defer req.HTTPResponse.Body.Close() - err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err) - } - return -} - -// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. -func UnmarshalMeta(req *request.Request) { - rest.UnmarshalMeta(req) -} - -// UnmarshalError unmarshals an error response for a JSON RPC service. -func UnmarshalError(req *request.Request) { - defer req.HTTPResponse.Body.Close() - bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err) - return - } - if len(bodyBytes) == 0 { - req.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", req.HTTPResponse.Status, nil), - req.HTTPResponse.StatusCode, - "", - ) - return - } - var jsonErr jsonErrorResponse - if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { - req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err) - return - } - - codes := strings.SplitN(jsonErr.Code, "#", 2) - req.Error = awserr.NewRequestFailure( - awserr.New(codes[len(codes)-1], jsonErr.Message, nil), - req.HTTPResponse.StatusCode, - req.RequestID, - ) -} - -type jsonErrorResponse struct { - Code string `json:"__type"` - Message string `json:"message"` -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonvalue.go deleted file mode 100644 index 8a270434..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonvalue.go +++ /dev/null @@ -1,76 +0,0 @@ -package protocol - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strconv" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// EscapeMode is the mode that should be use for escaping a value -type EscapeMode uint - -// The modes for escaping a value before it is marshaled, and unmarshaled. -const ( - NoEscape EscapeMode = iota - Base64Escape - QuotedEscape -) - -// EncodeJSONValue marshals the value into a JSON string, and optionally base64 -// encodes the string before returning it. -// -// Will panic if the escape mode is unknown. -func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { - b, err := json.Marshal(v) - if err != nil { - return "", err - } - - switch escape { - case NoEscape: - return string(b), nil - case Base64Escape: - return base64.StdEncoding.EncodeToString(b), nil - case QuotedEscape: - return strconv.Quote(string(b)), nil - } - - panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) -} - -// DecodeJSONValue will attempt to decode the string input as a JSONValue. -// Optionally decoding base64 the value first before JSON unmarshaling. -// -// Will panic if the escape mode is unknown. -func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { - var b []byte - var err error - - switch escape { - case NoEscape: - b = []byte(v) - case Base64Escape: - b, err = base64.StdEncoding.DecodeString(v) - case QuotedEscape: - var u string - u, err = strconv.Unquote(v) - b = []byte(u) - default: - panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) - } - - if err != nil { - return nil, err - } - - m := aws.JSONValue{} - err = json.Unmarshal(b, &m) - if err != nil { - return nil, err - } - - return m, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/metadata.go deleted file mode 100644 index 3a94fd02..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/metadata.go +++ /dev/null @@ -1,24 +0,0 @@ -package protocol - -// An Attribute is a FieldValue that resides within the imediant context of -// another field. Such as XML attribute for tags. -type Attribute struct { - Name string - Value ValueMarshaler - Meta Metadata -} - -// Metadata is a collection of configuration flags for encoders to render the -// output. -type Metadata struct { - Attributes []Attribute - - Flatten bool - - ListLocationName string - MapLocationNameKey string - MapLocationNameValue string - - XMLNamespacePrefix string - XMLNamespaceURI string -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/path_replace.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/path_replace.go deleted file mode 100644 index ae64a17d..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/path_replace.go +++ /dev/null @@ -1,136 +0,0 @@ -package protocol - -import ( - "bytes" - "fmt" -) - -const ( - uriTokenStart = '{' - uriTokenStop = '}' - uriTokenSkip = '+' -) - -func bufCap(b []byte, n int) []byte { - if cap(b) < n { - return make([]byte, 0, n) - } - - return b[0:0] -} - -// PathReplace replaces path elements using field buffers -type PathReplace struct { - // May mutate path slice - path []byte - rawPath []byte - fieldBuf []byte -} - -// NewPathReplace creats a built PathReplace value that can be used to replace -// path elements. -func NewPathReplace(path string) PathReplace { - return PathReplace{ - path: []byte(path), - rawPath: []byte(path), - } -} - -// Encode returns an unescaped path, and escaped path. -func (r *PathReplace) Encode() (path string, rawPath string) { - return string(r.path), string(r.rawPath) -} - -// ReplaceElement replaces a single element in the path string. -func (r *PathReplace) ReplaceElement(key, val string) (err error) { - r.path, r.fieldBuf, err = replacePathElement(r.path, r.fieldBuf, key, val, false) - r.rawPath, r.fieldBuf, err = replacePathElement(r.rawPath, r.fieldBuf, key, val, true) - return err -} - -func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { - fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } - fieldBuf = append(fieldBuf, uriTokenStart) - fieldBuf = append(fieldBuf, key...) - - start := bytes.Index(path, fieldBuf) - end := start + len(fieldBuf) - if start < 0 || len(path[end:]) == 0 { - // TODO what to do about error? - return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path) - } - - encodeSep := true - if path[end] == uriTokenSkip { - // '+' token means do not escape slashes - encodeSep = false - end++ - } - - if escape { - val = escapePath(val, encodeSep) - } - - if path[end] != uriTokenStop { - return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path) - } - end++ - - fieldBuf = bufCap(fieldBuf, len(val)) - fieldBuf = append(fieldBuf, val...) - - keyLen := end - start - valLen := len(fieldBuf) - - if keyLen == valLen { - copy(path[start:], fieldBuf) - return path, fieldBuf, nil - } - - newLen := len(path) + (valLen - keyLen) - if len(path) < newLen { - path = path[:cap(path)] - } - if cap(path) < newLen { - newURI := make([]byte, newLen) - copy(newURI, path) - path = newURI - } - - // shift - copy(path[start+valLen:], path[end:]) - path = path[:newLen] - copy(path[start:], fieldBuf) - - return path, fieldBuf, nil -} - -// copied from rest.EscapePath -// escapes part of a URL path in Amazon style -func escapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -var noEscape [256]bool - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/build.go deleted file mode 100644 index 3905c4bc..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/build.go +++ /dev/null @@ -1,36 +0,0 @@ -// Package query provides serialization of AWS query requests, and responses. -package query - -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go - -import ( - "net/url" - - request "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/private/protocol/query/queryutil" -) - -// BuildHandler is a named request handler for building query protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} - -// Build builds a request for an AWS Query service. -func Build(r *request.Request) { - body := url.Values{ - "Action": {r.Operation.Name}, - "Version": {r.Metadata.APIVersion}, - } - if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding Query request", err) - return - } - - if r.ExpireTime == 0 { - r.HTTPRequest.Method = "POST" - r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - r.SetBufferBody([]byte(body.Encode())) - } else { // This is a pre-signed request - r.HTTPRequest.Method = "GET" - r.HTTPRequest.URL.RawQuery = body.Encode() - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/queryutil/queryutil.go deleted file mode 100644 index 01de5ca9..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/queryutil/queryutil.go +++ /dev/null @@ -1,261 +0,0 @@ -package queryutil - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/private/protocol" -) - -// Parse parses an object i and fills a url.Values object. The isEC2 flag -// indicates if this is the EC2 Query sub-protocol. -func Parse(body url.Values, i interface{}, isEC2 bool) error { - q := queryParser{isEC2: isEC2} - return q.parseValue(body, reflect.ValueOf(i), "", "", false) -} - -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -type queryParser struct { - isEC2 bool -} - -func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag, parentCollection bool) error { - value = elemOf(value) - - // no need to handle zero values - if !value.IsValid() { - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - var err error - switch t { - case "structure": - err = q.parseStruct(v, value, prefix) - case "list": - err = q.parseList(v, value, prefix, tag) - case "map": - err = q.parseMap(v, value, prefix, tag) - default: - err = q.parseScalar(v, value, prefix, tag, parentCollection) - } - - if protocol.IsNotSetError(err) { - return nil - } - return err -} - -func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { - if !value.IsValid() { - return nil - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - elemValue := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - elemValue = reflect.ValueOf(token) - } - - var name string - if q.isEC2 { - name = field.Tag.Get("queryName") - } - if name == "" { - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if name != "" && q.isEC2 { - name = strings.ToUpper(name[0:1]) + name[1:] - } - } - if name == "" { - name = field.Name - } - - if prefix != "" { - name = prefix + "." + name - } - - if err := q.parseValue(v, elemValue, name, field.Tag, false); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - if _, ok := value.Interface().([]byte); ok { - return q.parseScalar(v, value, prefix, tag, true) - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - if listName := tag.Get("locationNameList"); listName == "" { - prefix += ".member" - } else { - prefix += "." + listName - } - } - - for i := 0; i < value.Len(); i++ { - slicePrefix := prefix - if slicePrefix == "" { - slicePrefix = strconv.Itoa(i + 1) - } else { - slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) - } - if err := q.parseValue(v, value.Index(i), slicePrefix, "", true); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".entry" - } - - // sort keys for improved serialization consistency. - // this is not strictly necessary for protocol support. - mapKeyValues := value.MapKeys() - mapKeys := map[string]reflect.Value{} - mapKeyNames := make([]string, len(mapKeyValues)) - for i, mapKey := range mapKeyValues { - name := mapKey.String() - mapKeys[name] = mapKey - mapKeyNames[i] = name - } - sort.Strings(mapKeyNames) - - for i, mapKeyName := range mapKeyNames { - mapKey := mapKeys[mapKeyName] - mapValue := value.MapIndex(mapKey) - - kname := tag.Get("locationNameKey") - if kname == "" { - kname = "key" - } - vname := tag.Get("locationNameValue") - if vname == "" { - vname = "value" - } - - // serialize key - var keyName string - if prefix == "" { - keyName = strconv.Itoa(i+1) + "." + kname - } else { - keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname - } - - if err := q.parseValue(v, mapKey, keyName, "", true); err != nil { - return err - } - - // serialize value - var valueName string - if prefix == "" { - valueName = strconv.Itoa(i+1) + "." + vname - } else { - valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname - } - - if err := q.parseValue(v, mapValue, valueName, "", true); err != nil { - return err - } - } - - return nil -} - -func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag, parentCollection bool) error { - if r.Kind() == reflect.String { - val, err := protocol.GetValue(r) - isEnum := len(tag.Get("enum")) != 0 - - if err == nil || parentCollection { - v.Set(name, val) - } else if isEnum && len(val) > 0 { - v.Set(name, val) - } else if !isEnum { - v.Set(name, val) - } - return err - } - - switch value := r.Interface().(type) { - case string: - v.Set(name, value) - case []byte: - if !r.IsNil() { - v.Set(name, base64.StdEncoding.EncodeToString(value)) - } - case bool: - v.Set(name, strconv.FormatBool(value)) - case int64: - v.Set(name, strconv.FormatInt(value, 10)) - case int: - v.Set(name, strconv.Itoa(value)) - case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) - case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - v.Set(name, value.UTC().Format(ISO8601UTC)) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal.go deleted file mode 100644 index 36c0e12e..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal.go +++ /dev/null @@ -1,33 +0,0 @@ -package query - -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go - -import ( - "encoding/xml" - - request "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil" -) - -// UnmarshalHandler is a named request handler for unmarshaling query protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals a response for an AWS Query service. -func Unmarshal(r *request.Request) { - defer r.HTTPResponse.Body.Close() - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") - if err != nil { - r.Error = awserr.New("SerializationError", "failed decoding Query response", err) - return - } -} - -// UnmarshalMeta unmarshals header response values for an AWS Query service. -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal_error.go deleted file mode 100644 index 2af00c32..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal_error.go +++ /dev/null @@ -1,66 +0,0 @@ -package query - -import ( - "encoding/xml" - "io/ioutil" - - request "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" -) - -type xmlErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} - -type xmlServiceUnavailableResponse struct { - XMLName xml.Name `xml:"ServiceUnavailableException"` -} - -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} - -// UnmarshalError unmarshals an error response for an AWS Query service. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) - return - } - - // First check for specific error - resp := xmlErrorResponse{} - decodeErr := xml.Unmarshal(bodyBytes, &resp) - if decodeErr == nil { - reqID := resp.RequestID - if reqID == "" { - reqID = r.RequestID - } - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) - return - } - - // Check for unhandled error - servUnavailResp := xmlServiceUnavailableResponse{} - unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) - if unavailErr == nil { - r.Error = awserr.NewRequestFailure( - awserr.New("ServiceUnavailableException", "service is unavailable", nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Failed to retrieve any error message from the response body - r.Error = awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query_encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query_encoder.go deleted file mode 100644 index baebc994..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query_encoder.go +++ /dev/null @@ -1,105 +0,0 @@ -package protocol - -import ( - "fmt" - "net/url" -) - -// QueryMapEncoder builds a query string. -type QueryMapEncoder struct { - Prefix string - Query url.Values - Err error -} - -// List will return a new QueryListEncoder. -func (e *QueryMapEncoder) List(k string) ListEncoder { - if len(e.Prefix) > 0 { - k = e.Prefix + k - } - - return &QueryListEncoder{k, e.Query, nil} -} - -// Map will return a new QueryMapEncoder. -func (e *QueryMapEncoder) Map(k string) MapEncoder { - if len(e.Prefix) > 0 { - k = e.Prefix + k - } - - return &QueryMapEncoder{k, e.Query, nil} -} - -// Start does nothing. -func (e *QueryMapEncoder) Start() {} - -// End does nothing. -func (e *QueryMapEncoder) End() {} - -// MapSetValue adds a single value to the query. -func (e *QueryMapEncoder) MapSetValue(k string, v ValueMarshaler) { - if e.Err != nil { - return - } - - str, err := v.MarshalValue() - if err != nil { - e.Err = err - return - } - - if len(e.Prefix) > 0 { - k = e.Prefix + k - } - - e.Query.Add(k, str) -} - -// MapSetFields Is not implemented, query map of map is undefined. -func (e *QueryMapEncoder) MapSetFields(k string, m FieldMarshaler) { - e.Err = fmt.Errorf("query map encoder MapSetFields not supported, %s", e.Prefix) -} - -// QueryListEncoder will encode list values nested into a query key. -type QueryListEncoder struct { - Key string - Query url.Values - Err error -} - -// List will return a new QueryListEncoder. -func (e *QueryListEncoder) List() ListEncoder { - return &QueryListEncoder{e.Key, e.Query, nil} -} - -// Start does nothing for the query protocol. -func (e *QueryListEncoder) Start() {} - -// End does nothing for the query protocol. -func (e *QueryListEncoder) End() {} - -// Map will return a new QueryMapEncoder. -func (e *QueryListEncoder) Map() MapEncoder { - k := e.Key - return &QueryMapEncoder{k, e.Query, nil} -} - -// ListAddValue encodes an individual list value into the querystring. -func (e *QueryListEncoder) ListAddValue(v ValueMarshaler) { - if e.Err != nil { - return - } - - str, err := v.MarshalValue() - if err != nil { - e.Err = err - return - } - - e.Query.Add(e.Key, str) -} - -// ListAddFields Is not implemented, query list of FieldMarshaler is undefined. -func (e *QueryListEncoder) ListAddFields(m FieldMarshaler) { - e.Err = fmt.Errorf("query list encoder ListAddFields not supported, %s", e.Key) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/build.go deleted file mode 100644 index 14c9f474..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/build.go +++ /dev/null @@ -1,339 +0,0 @@ -// Package rest provides RESTful serialization of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - request "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/private/protocol" -) - -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// BuildHandler is a named request handler for building rest protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} - -// Build builds the REST component of a service request. -func Build(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, false) - buildBody(r, v) - } -} - -// BuildAsGET builds the REST component of a service request with the ability to hoist -// data from the body. -func BuildAsGET(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, true) - buildBody(r, v) - } -} - -func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { - query := r.HTTPRequest.URL.Query() - - // Setup the raw path to match the base path pattern. This is needed - // so that when the path is mutated a custom escaped version can be - // stored in RawPath that will be used by the Go client. - r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch m.Kind() { - case reflect.Ptr, reflect.Interface: - if !m.Elem().IsValid() { - continue - } - } - - if field.Tag.Get("ignore") != "" { - continue - } - - var err error - switch field.Tag.Get("location") { - case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) - case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) - case "uri": - err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) - case "querystring": - err = buildQueryString(query, m, name, field.Tag) - default: - if buildGETQuery { - err = buildQueryString(query, m, name, field.Tag) - } - } - - if protocol.IsNotSetError(err) { - err = nil - } - r.Error = err - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - if !r.Config.DisableRestProtocolURICleaning { - cleanPath(r.HTTPRequest.URL) - } -} - -func buildBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New("SerializationError", - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { - var err error - str := "" - - if v.Kind() == reflect.String { - str, err = protocol.GetValue(v) - } else { - str, err = convertType(v, tag) - } - - if protocol.IsNotSetError(err) { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - header.Add(name, str) - - return nil -} - -func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { - prefix := tag.Get("locationName") - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key), tag) - if protocol.IsNotSetError(err) { - continue - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - - } - - header.Add(prefix+key.String(), str) - } - return nil -} - -func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { - value := "" - var err error - if v.Kind() == reflect.String { - value, err = protocol.GetValue(v) - } else { - value, err = convertType(v, tag) - } - - if protocol.IsNotSetError(err) { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) - u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) - - u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) - u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) - - return nil -} - -func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { - if kind := v.Kind(); kind == reflect.String { - value, err := protocol.GetValue(v) - if err == nil { - query.Add(name, value) - } - return err - } else if kind == reflect.Ptr { - v = v.Elem() - } - - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.String { - for i := 0; i < v.Len(); i++ { - query.Add(name, v.Index(i).String()) - } - return nil - } - - switch value := v.Interface().(type) { - case []*string: - for _, item := range value { - query.Add(name, *item) - } - case []string: - for _, item := range value { - query.Add(name, item) - } - case map[string]*string: - for key, item := range value { - query.Add(key, *item) - } - case map[string]string: - for key, item := range value { - query.Add(key, item) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - case map[string][]string: - for key, items := range value { - for _, item := range items { - query.Add(key, item) - } - } - default: - str, err := convertType(v, tag) - if protocol.IsNotSetError(err) { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - query.Set(name, str) - } - - return nil -} - -func cleanPath(u *url.URL) { - hasSlash := strings.HasSuffix(u.Path, "/") - - // clean up path, removing duplicate `/` - u.Path = path.Clean(u.Path) - u.RawPath = path.Clean(u.RawPath) - - if hasSlash && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - u.RawPath += "/" - } -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return "", &protocol.ErrValueNotSet{} - } - - switch value := v.Interface().(type) { - case string: - str = value - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - str = value.UTC().Format(RFC822) - case aws.JSONValue: - if len(value) == 0 { - return "", &protocol.ErrValueNotSet{} - } - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - str, err = protocol.EncodeJSONValue(value, escaping) - if err != nil { - return "", fmt.Errorf("unable to encode JSONValue, %v", err) - } - - default: - err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) - return "", err - } - return str, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/encode.go deleted file mode 100644 index ff1ce949..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/encode.go +++ /dev/null @@ -1,144 +0,0 @@ -package rest - -import ( - "fmt" - "io" - "net/http" - "net/url" - - "github.com/aws/aws-sdk-go-v2/private/protocol" -) - -// An Encoder provides encoding of REST URI path, query, and header components -// of an HTTP request. Can also encode a stream as the payload. -// -// Does not support SetFields. -type Encoder struct { - req *http.Request - - path protocol.PathReplace - - query url.Values - header http.Header - - payload io.ReadSeeker - - err error -} - -// NewEncoder creates a new encoder from the passed in request. All query and -// header values will be added on top of the request's existing values. Overwriting -// duplicate values. -func NewEncoder(req *http.Request) *Encoder { - e := &Encoder{ - req: req, - - path: protocol.NewPathReplace(req.URL.Path), - query: req.URL.Query(), - header: req.Header, - } - - return e -} - -// Encode will return the request and body if one was set. If the body -// payload was not set the io.ReadSeeker will be nil. -// -// returns any error if one occured while encoding the API's parameters. -func (e *Encoder) Encode() (*http.Request, io.ReadSeeker, error) { - if e.err != nil { - return nil, nil, e.err - } - - e.req.URL.Path, e.req.URL.RawPath = e.path.Encode() - e.req.URL.RawQuery = e.query.Encode() - e.req.Header = e.header - - return e.req, e.payload, nil -} - -// SetValue will set a value to the header, path, query. -// -// If the request's method is GET all BodyTarget values will be written to -// the query string. -func (e *Encoder) SetValue(t protocol.Target, k string, v protocol.ValueMarshaler, meta protocol.Metadata) { - if e.err != nil { - return - } - - var str string - str, e.err = v.MarshalValue() - if e.err != nil { - return - } - - switch t { - case protocol.HeaderTarget: - e.header.Set(k, str) - case protocol.PathTarget: - e.path.ReplaceElement(k, str) - case protocol.QueryTarget: - e.query.Set(k, str) - case protocol.BodyTarget: - if e.req.Method != "GET" { - e.err = fmt.Errorf("body target not supported for rest non-GET methods %s, %s", t, k) - return - } - e.query.Set(k, str) - default: - e.err = fmt.Errorf("unknown SetValue rest encode target, %s, %s", t, k) - } -} - -// SetStream will set the stream to the payload of the request. -func (e *Encoder) SetStream(t protocol.Target, k string, v protocol.StreamMarshaler, meta protocol.Metadata) { - if e.err != nil { - return - } - - switch t { - case protocol.PayloadTarget: - e.payload, e.err = v.MarshalStream() - default: - e.err = fmt.Errorf("unknown SetStream rest encode target, %s, %s", t, k) - } -} - -// List will set the nested list values to the header or query. -func (e *Encoder) List(t protocol.Target, k string, meta protocol.Metadata) protocol.ListEncoder { - if e.err != nil { - return nil - } - - switch t { - case protocol.QueryTarget: - return &protocol.QueryListEncoder{Key: k, Query: e.query} - case protocol.HeaderTarget: - return &protocol.HeaderListEncoder{Key: k, Header: e.header} - default: - e.err = fmt.Errorf("unknown SetList rest encode target, %s, %s", t, k) - return nil - } -} - -// Map will set the nested map values to the header or query. -func (e *Encoder) Map(t protocol.Target, k string, meta protocol.Metadata) protocol.MapEncoder { - if e.err != nil { - return nil - } - - switch t { - case protocol.QueryTarget: - return &protocol.QueryMapEncoder{Query: e.query} - case protocol.HeadersTarget: - return &protocol.HeaderMapEncoder{Prefix: k, Header: e.header} - default: - e.err = fmt.Errorf("unknown SetMap rest encode target, %s, %s", t, k) - return nil - } -} - -// SetFields is not supported for REST encoder. -func (e *Encoder) SetFields(t protocol.Target, k string, m protocol.FieldMarshaler, meta protocol.Metadata) { - e.err = fmt.Errorf("rest encoder SetFields not supported, %s, %s", t, k) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/payload.go deleted file mode 100644 index 4366de2e..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/payload.go +++ /dev/null @@ -1,45 +0,0 @@ -package rest - -import "reflect" - -// PayloadMember returns the payload field member of i if there is one, or nil. -func PayloadMember(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i).Elem() - if !v.IsValid() { - return nil - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - field, _ := v.Type().FieldByName(payloadName) - if field.Tag.Get("type") != "structure" { - return nil - } - - payload := v.FieldByName(payloadName) - if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { - return payload.Interface() - } - } - } - return nil -} - -// PayloadType returns the type of a payload field member of i if there is one, or "". -func PayloadType(i interface{}) string { - v := reflect.Indirect(reflect.ValueOf(i)) - if !v.IsValid() { - return "" - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - if member, ok := v.Type().FieldByName(payloadName); ok { - return member.Tag.Get("type") - } - } - } - return "" -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/unmarshal.go deleted file mode 100644 index a9f14154..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/unmarshal.go +++ /dev/null @@ -1,251 +0,0 @@ -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - request "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/private/protocol" -) - -// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *request.Request) { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalBody(r, v) -} - -// UnmarshalMeta unmarshals the REST metadata of a response in a REST service -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") - if r.RequestID == "" { - // Alternative version of request id in the header - r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") - } - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalLocationElements(r, v) -} - -func unmarshalBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := v.FieldByName(payloadName) - if payload.IsValid() { - switch payload.Interface().(type) { - case []byte: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - payload.Set(reflect.ValueOf(b)) - } - case *string: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - str := string(b) - payload.Set(reflect.ValueOf(&str)) - } - default: - switch payload.Type().String() { - case "io.ReadCloser": - payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) - case "io.ReadSeeker": - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", - "failed to read response body", err) - return - } - payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) - default: - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", - "failed to decode REST response", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } - } -} - -func unmarshalLocationElements(r *request.Request, v reflect.Value) { - for i := 0; i < v.NumField(); i++ { - m, field := v.Field(i), v.Type().Field(i) - if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch field.Tag.Get("location") { - case "statusCode": - unmarshalStatusCode(m, r.HTTPResponse.StatusCode) - case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break - } - case "headers": - prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break - } - } - } - if r.Error != nil { - return - } - } -} - -func unmarshalStatusCode(v reflect.Value, statusCode int) { - if !v.IsValid() { - return - } - - switch v.Interface().(type) { - case *int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(&s)) - case int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(s)) - } -} - -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { - switch r.Interface().(type) { - case map[string]string: // we only support string map value types - out := map[string]string{} - for k, v := range headers { - k = http.CanonicalHeaderKey(k) - if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { - out[k[len(prefix):]] = v[0] - } - } - r.Set(reflect.ValueOf(out)) - } - return nil -} - -func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - isJSONValue := tag.Get("type") == "jsonvalue" - if isJSONValue { - if len(header) == 0 { - return nil - } - } else if v.Kind() == reflect.String { - if len(header) > 0 { - v.SetString(header) - } - return nil - } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } - - switch v.Interface().(type) { - case *string: - v.Set(reflect.ValueOf(&header)) - case string: - v.Set(reflect.ValueOf(header)) - case []byte: - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(b)) - case *int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&i)) - case int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(i)) - case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&f)) - case float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(f)) - case *time.Time: - t, err := time.Parse(RFC822, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&t)) - case time.Time: - t, err := time.Parse(RFC822, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(t)) - case aws.JSONValue: - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - m, err := protocol.DecodeJSONValue(header, escaping) - if err != nil { - return err - } - v.Set(reflect.ValueOf(m)) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return err - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/restjson/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/restjson/encode.go deleted file mode 100644 index 84c6dc80..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/restjson/encode.go +++ /dev/null @@ -1,165 +0,0 @@ -package restjson - -import ( - "fmt" - "io" - "net/http" - - "github.com/aws/aws-sdk-go-v2/private/protocol" - "github.com/aws/aws-sdk-go-v2/private/protocol/json" - "github.com/aws/aws-sdk-go-v2/private/protocol/rest" -) - -// An Encoder provides encoding of the AWS RESTJSON protocol. This encoder combindes -// the JSON and REST encoders deligating to them for their associated targets. -// -// It is invalid to set a JSON and stream payload on the same encoder. -type Encoder struct { - method string - reqEncoder *rest.Encoder - bodyEncoder *json.Encoder - t protocol.Target - - err error -} - -// NewEncoder creates a new encoder for encoding the AWS RESTJSON protocol. -// The request passed in will be the base the path, query, and headers encoded -// will be set on top of. -func NewEncoder(req *http.Request) *Encoder { - e := &Encoder{ - method: req.Method, - reqEncoder: rest.NewEncoder(req), - bodyEncoder: json.NewEncoder(), - } - - return e -} - -// Encode returns the encoded request, and body payload. If no payload body was -// set nil will be returned. If an error occurred while encoding the API an -// error will be returned. -func (e *Encoder) Encode() (*http.Request, io.ReadSeeker, error) { - req, payloadBody, err := e.reqEncoder.Encode() - if err != nil { - return nil, nil, err - } - - jsonBody, err := e.bodyEncoder.Encode() - if err != nil { - return nil, nil, err - } - - havePayload := payloadBody != nil - haveJSON := jsonBody != nil - - if havePayload == haveJSON && haveJSON { - return nil, nil, fmt.Errorf("unexpected JSON body and request payload for AWSMarshaler") - } - - body := payloadBody - if body == nil { - body = jsonBody - } - - return req, body, err -} - -// SetValue will set a value to the header, path, query, or body. -// -// If the request's method is GET all BodyTarget values will be written to -// the query string. -func (e *Encoder) SetValue(t protocol.Target, k string, v protocol.ValueMarshaler, meta protocol.Metadata) { - if e.err != nil { - return - } - - switch t { - case protocol.PathTarget: - fallthrough - case protocol.QueryTarget: - fallthrough - case protocol.HeaderTarget: - e.reqEncoder.SetValue(t, k, v, meta) - case protocol.BodyTarget: - fallthrough - case protocol.PayloadTarget: - if e.method == "GET" { - e.reqEncoder.SetValue(t, k, v, meta) - } else { - e.bodyEncoder.SetValue(t, k, v, meta) - } - default: - e.err = fmt.Errorf("unknown SetValue restjson encode target, %s, %s", t, k) - } -} - -// SetStream will set the stream to the payload of the request. -func (e *Encoder) SetStream(t protocol.Target, k string, v protocol.StreamMarshaler, meta protocol.Metadata) { - if e.err != nil { - return - } - - switch t { - case protocol.PayloadTarget: - e.reqEncoder.SetStream(t, k, v, meta) - default: - e.err = fmt.Errorf("invalid target %s, for SetStream, must be PayloadTarget", t) - } -} - -// List will return the proper list encoder based on the given protocol.Target. -func (e *Encoder) List(t protocol.Target, k string, meta protocol.Metadata) protocol.ListEncoder { - if e.err != nil { - return nil - } - e.t = t - - switch t { - case protocol.HeaderTarget: - fallthrough - case protocol.QueryTarget: - return e.reqEncoder.List(t, k, meta) - case protocol.BodyTarget: - return e.bodyEncoder.List(t, k, meta) - default: - e.err = fmt.Errorf("unknown SetList restjson encode target, %s, %s", t, k) - return nil - } -} - -// Map will return the proper map encoder based on the given protocol Target. -func (e *Encoder) Map(t protocol.Target, k string, meta protocol.Metadata) protocol.MapEncoder { - if e.err != nil { - return nil - } - - e.t = t - switch t { - case protocol.QueryTarget: - fallthrough - case protocol.HeadersTarget: - return e.reqEncoder.Map(t, k, meta) - case protocol.BodyTarget: - return e.bodyEncoder.Map(t, k, meta) - default: - e.err = fmt.Errorf("unknown SetMap restjson encode target, %s, %s", t, k) - return nil - } -} - -// SetFields will set the nested type's fields to the body. -func (e *Encoder) SetFields(t protocol.Target, k string, m protocol.FieldMarshaler, meta protocol.Metadata) { - if e.err != nil { - return - } - - switch t { - case protocol.PayloadTarget: - fallthrough - case protocol.BodyTarget: - e.bodyEncoder.SetFields(t, k, m, meta) - default: - e.err = fmt.Errorf("unknown SetMarshaler restjson encode target, %s, %s", t, k) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/restjson/restjson.go deleted file mode 100644 index 2e7aff23..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/restjson/restjson.go +++ /dev/null @@ -1,112 +0,0 @@ -// Package restjson provides RESTful JSON serialization of AWS -// requests and responses. -package restjson - -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-json.json build_test.go -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go - -import ( - "encoding/json" - "io" - "io/ioutil" - "strings" - - request "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/awserr" - "github.com/aws/aws-sdk-go-v2/private/protocol" - "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go-v2/private/protocol/rest" -) - -// BuildHandler is a named request handler for building restjson protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.restjson.Build", Fn: Build} - -// UnmarshalHandler is a named request handler for unmarshaling restjson protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restjson.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling restjson protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalMeta", Fn: UnmarshalMeta} - -// UnmarshalErrorHandler is a named request handler for unmarshaling restjson protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalError", Fn: UnmarshalError} - -// Build builds a request for the REST JSON protocol. -func Build(r *request.Request) { - if m, ok := r.Params.(protocol.FieldMarshaler); ok { - e := NewEncoder(r.HTTPRequest) - - m.MarshalFields(e) - - var body io.ReadSeeker - var err error - r.HTTPRequest, body, err = e.Encode() - if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to encode rest JSON request", err) - return - } - if body != nil { - r.SetReaderBody(body) - } - return - } - - rest.Build(r) - - if t := rest.PayloadType(r.Params); t == "structure" || t == "" { - jsonrpc.Build(r) - } -} - -// Unmarshal unmarshals a response body for the REST JSON protocol. -func Unmarshal(r *request.Request) { - if t := rest.PayloadType(r.Data); t == "structure" || t == "" { - jsonrpc.Unmarshal(r) - } else { - rest.Unmarshal(r) - } -} - -// UnmarshalMeta unmarshals response headers for the REST JSON protocol. -func UnmarshalMeta(r *request.Request) { - rest.UnmarshalMeta(r) -} - -// UnmarshalError unmarshals a response error for the REST JSON protocol. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - code := r.HTTPResponse.Header.Get("X-Amzn-Errortype") - bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed reading REST JSON error response", err) - return - } - if len(bodyBytes) == 0 { - r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", r.HTTPResponse.Status, nil), - r.HTTPResponse.StatusCode, - "", - ) - return - } - var jsonErr jsonErrorResponse - if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { - r.Error = awserr.New("SerializationError", "failed decoding REST JSON error response", err) - return - } - - if code == "" { - code = jsonErr.Code - } - - code = strings.SplitN(code, ":", 2)[0] - r.Error = awserr.NewRequestFailure( - awserr.New(code, jsonErr.Message, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) -} - -type jsonErrorResponse struct { - Code string `json:"code"` - Message string `json:"message"` -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/target.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/target.go deleted file mode 100644 index 36e65d32..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/target.go +++ /dev/null @@ -1,38 +0,0 @@ -package protocol - -import "fmt" - -// Target is the encode and decode targets of protocol marshaling. -type Target int - -// The protocol marshaling targets. -const ( - PathTarget Target = iota - QueryTarget - HeaderTarget - HeadersTarget - StatusCodeTarget - BodyTarget - PayloadTarget -) - -func (e Target) String() string { - switch e { - case PathTarget: - return "Path" - case QueryTarget: - return "Query" - case HeaderTarget: - return "Header" - case HeadersTarget: - return "Headers" - case StatusCodeTarget: - return "StatusCode" - case BodyTarget: - return "Body" - case PayloadTarget: - return "Payload" - default: - panic(fmt.Sprintf("// unknown encoding target, %d", e)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/unmarshal.go deleted file mode 100644 index de048348..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/unmarshal.go +++ /dev/null @@ -1,21 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - - request "github.com/aws/aws-sdk-go-v2/aws" -) - -// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body -var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} - -// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. -func UnmarshalDiscardBody(r *request.Request) { - if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { - return - } - - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/value.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/value.go deleted file mode 100644 index 892b286e..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/value.go +++ /dev/null @@ -1,30 +0,0 @@ -package protocol - -import ( - "reflect" -) - -// ErrValueNotSet is an error that is returned when the value -// has not been set. -type ErrValueNotSet struct{} - -func (err *ErrValueNotSet) Error() string { - return "value not set" -} - -// IsNotSetError will return true if the error is of ErrValueNotSet -func IsNotSetError(err error) bool { - _, ok := err.(*ErrValueNotSet) - return ok -} - -// GetValue will return the value that is associated with the reflect.Value. -// If that value is not set, this will return an ErrValueNotSet -func GetValue(r reflect.Value) (string, error) { - val := r.String() - if len(val) == 0 { - return "", &ErrValueNotSet{} - } - - return val, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/build.go deleted file mode 100644 index ab4f1e4e..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/build.go +++ /dev/null @@ -1,302 +0,0 @@ -// Package xmlutil provides XML serialization of AWS requests and responses. -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go-v2/private/protocol" -) - -// BuildXML will serialize params into an xml.Encoder. -// Error will be returned if the serialization of any of the params or nested values fails. -func BuildXML(params interface{}, e *xml.Encoder) error { - b := xmlBuilder{encoder: e, namespaces: map[string]string{}} - root := NewXMLElement(xml.Name{}) - if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { - return err - } - for _, c := range root.Children { - for _, v := range c { - return StructToXML(e, v, false) - } - } - return nil -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -// A xmlBuilder serializes values from Go code to XML -type xmlBuilder struct { - encoder *xml.Encoder - namespaces map[string]string -} - -// buildValue generic XMLNode builder for any type. Will build value for their specific type -// struct, list, map, scalar. -// -// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If -// type is not provided reflect will be used to determine the value's type. -func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - value = elemOf(value) - if !value.IsValid() { // no need to handle zero values - return nil - } else if tag.Get("location") != "" { // don't handle non-body location values - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := value.Type().FieldByName("_"); ok { - tag = tag + reflect.StructTag(" ") + field.Tag - } - return b.buildStruct(value, current, tag) - case "list": - return b.buildList(value, current, tag) - case "map": - return b.buildMap(value, current, tag) - default: - return b.buildScalar(value, current, tag) - } -} - -// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested -// types are converted to XMLNodes also. -func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - fieldAdded := false - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - - // there is an xmlNamespace associated with this struct - if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { - ns := xml.Attr{ - Name: xml.Name{Local: "xmlns"}, - Value: uri, - } - if prefix != "" { - b.namespaces[prefix] = uri // register the namespace - ns.Name.Local = "xmlns:" + prefix - } - - child.Attr = append(child.Attr, ns) - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - member := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - mTag := field.Tag - if mTag.Get("location") != "" { // skip non-body members - continue - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(token) - } - - memberName := mTag.Get("locationName") - if memberName == "" { - memberName = field.Name - mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) - } - if err := b.buildValue(member, child, mTag); err != nil { - return err - } - - fieldAdded = true - } - - if fieldAdded { // only append this child if we have one ore more valid members - current.AddChild(child) - } - - return nil -} - -// buildList adds the value's list items to the current XMLNode as children nodes. All -// nested values in the list are converted to XMLNodes also. -func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted lists - return nil - } - - // check for unflattened list member - flattened := tag.Get("flattened") != "" - - xname := xml.Name{Local: tag.Get("locationName")} - if flattened { - for i := 0; i < value.Len(); i++ { - child := NewXMLElement(xname) - current.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } else { - list := NewXMLElement(xname) - current.AddChild(list) - - for i := 0; i < value.Len(); i++ { - iname := tag.Get("locationNameList") - if iname == "" { - iname = "member" - } - - child := NewXMLElement(xml.Name{Local: iname}) - list.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } - - return nil -} - -// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All -// nested values in the map are converted to XMLNodes also. -// -// Error will be returned if it is unable to build the map's values into XMLNodes -func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted maps - return nil - } - - maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - current.AddChild(maproot) - current = maproot - - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - // sorting is not required for compliance, but it makes testing easier - keys := make([]string, value.Len()) - for i, k := range value.MapKeys() { - keys[i] = k.String() - } - sort.Strings(keys) - - for _, k := range keys { - v := value.MapIndex(reflect.ValueOf(k)) - - mapcur := current - if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps - child := NewXMLElement(xml.Name{Local: "entry"}) - mapcur.AddChild(child) - mapcur = child - } - - kchild := NewXMLElement(xml.Name{Local: kname}) - kchild.Text = k - vchild := NewXMLElement(xml.Name{Local: vname}) - mapcur.AddChild(kchild) - mapcur.AddChild(vchild) - - if err := b.buildValue(v, vchild, ""); err != nil { - return err - } - } - - return nil -} - -// buildScalar will convert the value into a string and append it as a attribute or child -// of the current XMLNode. -// -// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. -// -// Error will be returned if the value type is unsupported. -func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - var str string - switch converted := value.Interface().(type) { - case string: - str = converted - case []byte: - if !value.IsNil() { - str = base64.StdEncoding.EncodeToString(converted) - } - case bool: - str = strconv.FormatBool(converted) - case int64: - str = strconv.FormatInt(converted, 10) - case int: - str = strconv.Itoa(converted) - case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) - case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - str = converted.UTC().Format(ISO8601UTC) - default: - if value.Kind() != reflect.String { - return fmt.Errorf("unsupported value for param %s: %v (%s)", - tag.Get("locationName"), value.Interface(), value.Type().Name()) - } - str = value.Convert(reflect.TypeOf("")).String() - } - - if len(str) == 0 { - return nil - } - xname := xml.Name{Local: tag.Get("locationName")} - if tag.Get("xmlAttribute") != "" { // put into current node's attribute list - attr := xml.Attr{Name: xname, Value: str} - current.Attr = append(current.Attr, attr) - } else { // regular text node - current.AddChild(&XMLNode{Name: xname, Text: str}) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/unmarshal.go deleted file mode 100644 index 2b95c35e..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/unmarshal.go +++ /dev/null @@ -1,278 +0,0 @@ -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" -) - -// UnmarshalXML deserializes an xml.Decoder into the container v. V -// needs to match the shape of the XML expected to be decoded. -// If the shape doesn't match unmarshaling will fail. -func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, err := XMLToStruct(d, nil) - if err != nil { - return err - } - if n.Children != nil { - for _, root := range n.Children { - for _, c := range root { - if wrappedChild, ok := c.Children[wrapper]; ok { - c = wrappedChild[0] // pull out wrapped element - } - - err = parse(reflect.ValueOf(v), c, "") - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } - } - return nil - } - return nil -} - -// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect -// will be used to determine the type from r. -func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - rtype := r.Type() - if rtype.Kind() == reflect.Ptr { - rtype = rtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch rtype.Kind() { - case reflect.Struct: - // also it can't be a time object - _, tok := r.Interface().(*time.Time) - if _, ok := r.Interface().(time.Time); !(ok || tok) { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := r.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := rtype.FieldByName("_"); ok { - tag = field.Tag - } - return parseStruct(r, node, tag) - case "list": - return parseList(r, node, tag) - case "map": - return parseMap(r, node, tag) - default: - return parseScalar(r, node, tag) - } -} - -// parseStruct deserializes a structure and its fields from an XMLNode. Any nested -// types in the structure will also be deserialized. -func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - if r.Kind() == reflect.Ptr { - if r.IsNil() { // create the structure if it's nil - s := reflect.New(r.Type().Elem()) - r.Set(s) - r = s - } - - r = r.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return parseStruct(r.FieldByName(payload), node, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if c := field.Name[0:1]; strings.ToLower(c) == c { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - // try to find the field by name in elements - elems := node.Children[name] - - if elems == nil { // try to find the field in attributes - if val, ok := node.findElem(name); ok { - elems = []*XMLNode{{Text: val}} - } - } - - member := r.FieldByName(field.Name) - for _, elem := range elems { - err := parse(member, elem, field.Tag) - if err != nil { - return err - } - } - } - return nil -} - -// parseList deserializes a list of values from an XML node. Each list entry -// will also be deserialized. -func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - - if tag.Get("flattened") == "" { // look at all item entries - mname := "member" - if name := tag.Get("locationNameList"); name != "" { - mname = name - } - - if Children, ok := node.Children[mname]; ok { - if r.IsNil() { - r.Set(reflect.MakeSlice(t, len(Children), len(Children))) - } - - for i, c := range Children { - err := parse(r.Index(i), c, "") - if err != nil { - return err - } - } - } - } else { // flattened list means this is a single element - if r.IsNil() { - r.Set(reflect.MakeSlice(t, 0, 0)) - } - - childR := reflect.Zero(t.Elem()) - r.Set(reflect.Append(r, childR)) - err := parse(r.Index(r.Len()-1), node, "") - if err != nil { - return err - } - } - - return nil -} - -// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode -// will also be deserialized as map entries. -func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - if r.IsNil() { - r.Set(reflect.MakeMap(r.Type())) - } - - if tag.Get("flattened") == "" { // look at all child entries - for _, entry := range node.Children["entry"] { - parseMapEntry(r, entry, tag) - } - } else { // this element is itself an entry - parseMapEntry(r, node, tag) - } - - return nil -} - -// parseMapEntry deserializes a map entry from a XML node. -func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - keys, ok := node.Children[kname] - values := node.Children[vname] - if ok { - for i, key := range keys { - keyR := reflect.ValueOf(key.Text) - value := values[i] - valueR := reflect.New(r.Type().Elem()).Elem() - - parse(valueR, value, "") - r.SetMapIndex(keyR, valueR) - } - } - return nil -} - -// parseScaller deserializes an XMLNode value into a concrete type based on the -// interface type of r. -// -// Error is returned if the deserialization fails due to invalid type conversion, -// or unsupported interface type. -func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - if r.Kind() == reflect.String { - r.SetString(node.Text) - return nil - } - - if r.Kind() == reflect.Ptr { - if r.IsNil() { - r.Set(reflect.New(r.Type().Elem())) - } - r = r.Elem() - } - - switch r.Interface().(type) { - case string: - r.Set(reflect.ValueOf(node.Text)) - case []byte: - b, err := base64.StdEncoding.DecodeString(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(b)) - case bool: - v, err := strconv.ParseBool(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(v)) - case int64: - v, err := strconv.ParseInt(node.Text, 10, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(v)) - case float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(v)) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - t, err := time.Parse(ISO8601UTC, node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/xml_to_struct.go deleted file mode 100644 index 3e970b62..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/xml_to_struct.go +++ /dev/null @@ -1,147 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "fmt" - "io" - "sort" -) - -// A XMLNode contains the values to be encoded or decoded. -type XMLNode struct { - Name xml.Name `json:",omitempty"` - Children map[string][]*XMLNode `json:",omitempty"` - Text string `json:",omitempty"` - Attr []xml.Attr `json:",omitempty"` - - namespaces map[string]string - parent *XMLNode -} - -// NewXMLElement returns a pointer to a new XMLNode initialized to default values. -func NewXMLElement(name xml.Name) *XMLNode { - return &XMLNode{ - Name: name, - Children: map[string][]*XMLNode{}, - Attr: []xml.Attr{}, - } -} - -// AddChild adds child to the XMLNode. -func (n *XMLNode) AddChild(child *XMLNode) { - if _, ok := n.Children[child.Name.Local]; !ok { - n.Children[child.Name.Local] = []*XMLNode{} - } - n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) -} - -// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. -func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { - out := &XMLNode{} - for { - tok, err := d.Token() - if err != nil { - if err == io.EOF { - break - } else { - return out, err - } - } - - if tok == nil { - break - } - - switch typed := tok.(type) { - case xml.CharData: - out.Text = string(typed.Copy()) - case xml.StartElement: - el := typed.Copy() - out.Attr = el.Attr - if out.Children == nil { - out.Children = map[string][]*XMLNode{} - } - - name := typed.Name.Local - slice := out.Children[name] - if slice == nil { - slice = []*XMLNode{} - } - node, e := XMLToStruct(d, &el) - out.findNamespaces() - if e != nil { - return out, e - } - node.Name = typed.Name - node.findNamespaces() - tempOut := *out - // Save into a temp variable, simply because out gets squashed during - // loop iterations - node.parent = &tempOut - slice = append(slice, node) - out.Children[name] = slice - case xml.EndElement: - if s != nil && s.Name.Local == typed.Name.Local { // matching end token - return out, nil - } - out = &XMLNode{} - } - } - return out, nil -} - -func (n *XMLNode) findNamespaces() { - ns := map[string]string{} - for _, a := range n.Attr { - if a.Name.Space == "xmlns" { - ns[a.Value] = a.Name.Local - } - } - - n.namespaces = ns -} - -func (n *XMLNode) findElem(name string) (string, bool) { - for node := n; node != nil; node = node.parent { - for _, a := range node.Attr { - namespace := a.Name.Space - if v, ok := node.namespaces[namespace]; ok { - namespace = v - } - if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { - return a.Value, true - } - } - } - return "", false -} - -// StructToXML writes an XMLNode to a xml.Encoder as tokens. -func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) - - if node.Text != "" { - e.EncodeToken(xml.CharData([]byte(node.Text))) - } else if sorted { - sortedNames := []string{} - for k := range node.Children { - sortedNames = append(sortedNames, k) - } - sort.Strings(sortedNames) - - for _, k := range sortedNames { - for _, v := range node.Children[k] { - StructToXML(e, v, sorted) - } - } - } else { - for _, c := range node.Children { - for _, v := range c { - StructToXML(e, v, sorted) - } - } - } - - e.EncodeToken(xml.EndElement{Name: node.Name}) - return e.Flush() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/api.go b/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/api.go deleted file mode 100644 index 0efb2589..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/api.go +++ /dev/null @@ -1,8446 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package lambda - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/awsutil" - "github.com/aws/aws-sdk-go-v2/private/protocol" - "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" -) - -const opAddLayerVersionPermission = "AddLayerVersionPermission" - -// AddLayerVersionPermissionRequest is a API request type for the AddLayerVersionPermission API operation. -type AddLayerVersionPermissionRequest struct { - *aws.Request - Input *AddLayerVersionPermissionInput - Copy func(*AddLayerVersionPermissionInput) AddLayerVersionPermissionRequest -} - -// Send marshals and sends the AddLayerVersionPermission API request. -func (r AddLayerVersionPermissionRequest) Send() (*AddLayerVersionPermissionOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*AddLayerVersionPermissionOutput), nil -} - -// AddLayerVersionPermissionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Adds permissions to the resource-based policy of a version of a function -// layer. Use this action to grant layer usage permission to other accounts. -// You can grant permission to a single account, all AWS accounts, or all accounts -// in an organization. -// -// To revoke permission, call RemoveLayerVersionPermission with the statement -// ID that you specified when you added it. -// -// // Example sending a request using the AddLayerVersionPermissionRequest method. -// req := client.AddLayerVersionPermissionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/AddLayerVersionPermission -func (c *Lambda) AddLayerVersionPermissionRequest(input *AddLayerVersionPermissionInput) AddLayerVersionPermissionRequest { - op := &aws.Operation{ - Name: opAddLayerVersionPermission, - HTTPMethod: "POST", - HTTPPath: "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", - } - - if input == nil { - input = &AddLayerVersionPermissionInput{} - } - - output := &AddLayerVersionPermissionOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return AddLayerVersionPermissionRequest{Request: req, Input: input, Copy: c.AddLayerVersionPermissionRequest} -} - -const opAddPermission = "AddPermission" - -// AddPermissionRequest is a API request type for the AddPermission API operation. -type AddPermissionRequest struct { - *aws.Request - Input *AddPermissionInput - Copy func(*AddPermissionInput) AddPermissionRequest -} - -// Send marshals and sends the AddPermission API request. -func (r AddPermissionRequest) Send() (*AddPermissionOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*AddPermissionOutput), nil -} - -// AddPermissionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Adds a permission to the resource policy associated with the specified AWS -// Lambda function. You use resource policies to grant permissions to event -// sources that use the push model. In a push model, event sources (such as -// Amazon S3 and custom applications) invoke your Lambda function. Each permission -// you add to the resource policy allows an event source permission to invoke -// the Lambda function. -// -// Permissions apply to the Amazon Resource Name (ARN) used to invoke the function, -// which can be unqualified (the unpublished version of the function), or include -// a version or alias. If a client uses a version or alias to invoke a function, -// use the Qualifier parameter to apply permissions to that ARN. For more information -// about versioning, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). -// -// This operation requires permission for the lambda:AddPermission action. -// -// // Example sending a request using the AddPermissionRequest method. -// req := client.AddPermissionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/AddPermission -func (c *Lambda) AddPermissionRequest(input *AddPermissionInput) AddPermissionRequest { - op := &aws.Operation{ - Name: opAddPermission, - HTTPMethod: "POST", - HTTPPath: "/2015-03-31/functions/{FunctionName}/policy", - } - - if input == nil { - input = &AddPermissionInput{} - } - - output := &AddPermissionOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return AddPermissionRequest{Request: req, Input: input, Copy: c.AddPermissionRequest} -} - -const opCreateAlias = "CreateAlias" - -// CreateAliasRequest is a API request type for the CreateAlias API operation. -type CreateAliasRequest struct { - *aws.Request - Input *CreateAliasInput - Copy func(*CreateAliasInput) CreateAliasRequest -} - -// Send marshals and sends the CreateAlias API request. -func (r CreateAliasRequest) Send() (*UpdateAliasOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateAliasOutput), nil -} - -// CreateAliasRequest returns a request value for making API operation for -// AWS Lambda. -// -// Creates an alias that points to the specified Lambda function version. For -// more information, see Introduction to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). -// -// Alias names are unique for a given function. This requires permission for -// the lambda:CreateAlias action. -// -// // Example sending a request using the CreateAliasRequest method. -// req := client.CreateAliasRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/CreateAlias -func (c *Lambda) CreateAliasRequest(input *CreateAliasInput) CreateAliasRequest { - op := &aws.Operation{ - Name: opCreateAlias, - HTTPMethod: "POST", - HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases", - } - - if input == nil { - input = &CreateAliasInput{} - } - - output := &UpdateAliasOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return CreateAliasRequest{Request: req, Input: input, Copy: c.CreateAliasRequest} -} - -const opCreateEventSourceMapping = "CreateEventSourceMapping" - -// CreateEventSourceMappingRequest is a API request type for the CreateEventSourceMapping API operation. -type CreateEventSourceMappingRequest struct { - *aws.Request - Input *CreateEventSourceMappingInput - Copy func(*CreateEventSourceMappingInput) CreateEventSourceMappingRequest -} - -// Send marshals and sends the CreateEventSourceMapping API request. -func (r CreateEventSourceMappingRequest) Send() (*UpdateEventSourceMappingOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateEventSourceMappingOutput), nil -} - -// CreateEventSourceMappingRequest returns a request value for making API operation for -// AWS Lambda. -// -// Creates a mapping between an event source and an AWS Lambda function. Lambda -// reads items from the event source and triggers the function. -// -// For details about each event source type, see the following topics. -// -// * Using AWS Lambda with Amazon Kinesis (http://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) -// -// * Using AWS Lambda with Amazon SQS (http://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) -// -// * Using AWS Lambda with Amazon DynamoDB (http://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) -// -// // Example sending a request using the CreateEventSourceMappingRequest method. -// req := client.CreateEventSourceMappingRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/CreateEventSourceMapping -func (c *Lambda) CreateEventSourceMappingRequest(input *CreateEventSourceMappingInput) CreateEventSourceMappingRequest { - op := &aws.Operation{ - Name: opCreateEventSourceMapping, - HTTPMethod: "POST", - HTTPPath: "/2015-03-31/event-source-mappings/", - } - - if input == nil { - input = &CreateEventSourceMappingInput{} - } - - output := &UpdateEventSourceMappingOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return CreateEventSourceMappingRequest{Request: req, Input: input, Copy: c.CreateEventSourceMappingRequest} -} - -const opCreateFunction = "CreateFunction" - -// CreateFunctionRequest is a API request type for the CreateFunction API operation. -type CreateFunctionRequest struct { - *aws.Request - Input *CreateFunctionInput - Copy func(*CreateFunctionInput) CreateFunctionRequest -} - -// Send marshals and sends the CreateFunction API request. -func (r CreateFunctionRequest) Send() (*UpdateFunctionConfigurationOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateFunctionConfigurationOutput), nil -} - -// CreateFunctionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Creates a new Lambda function. The function configuration is created from -// the request parameters, and the code for the function is provided by a .zip -// file. The function name is case-sensitive. -// -// This operation requires permission for the lambda:CreateFunction action. -// -// // Example sending a request using the CreateFunctionRequest method. -// req := client.CreateFunctionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/CreateFunction -func (c *Lambda) CreateFunctionRequest(input *CreateFunctionInput) CreateFunctionRequest { - op := &aws.Operation{ - Name: opCreateFunction, - HTTPMethod: "POST", - HTTPPath: "/2015-03-31/functions", - } - - if input == nil { - input = &CreateFunctionInput{} - } - - output := &UpdateFunctionConfigurationOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return CreateFunctionRequest{Request: req, Input: input, Copy: c.CreateFunctionRequest} -} - -const opDeleteAlias = "DeleteAlias" - -// DeleteAliasRequest is a API request type for the DeleteAlias API operation. -type DeleteAliasRequest struct { - *aws.Request - Input *DeleteAliasInput - Copy func(*DeleteAliasInput) DeleteAliasRequest -} - -// Send marshals and sends the DeleteAlias API request. -func (r DeleteAliasRequest) Send() (*DeleteAliasOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*DeleteAliasOutput), nil -} - -// DeleteAliasRequest returns a request value for making API operation for -// AWS Lambda. -// -// Deletes the specified Lambda function alias. For more information, see Introduction -// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). -// -// This requires permission for the lambda:DeleteAlias action. -// -// // Example sending a request using the DeleteAliasRequest method. -// req := client.DeleteAliasRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteAlias -func (c *Lambda) DeleteAliasRequest(input *DeleteAliasInput) DeleteAliasRequest { - op := &aws.Operation{ - Name: opDeleteAlias, - HTTPMethod: "DELETE", - HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", - } - - if input == nil { - input = &DeleteAliasInput{} - } - - output := &DeleteAliasOutput{} - req := c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output.responseMetadata = aws.Response{Request: req} - - return DeleteAliasRequest{Request: req, Input: input, Copy: c.DeleteAliasRequest} -} - -const opDeleteEventSourceMapping = "DeleteEventSourceMapping" - -// DeleteEventSourceMappingRequest is a API request type for the DeleteEventSourceMapping API operation. -type DeleteEventSourceMappingRequest struct { - *aws.Request - Input *DeleteEventSourceMappingInput - Copy func(*DeleteEventSourceMappingInput) DeleteEventSourceMappingRequest -} - -// Send marshals and sends the DeleteEventSourceMapping API request. -func (r DeleteEventSourceMappingRequest) Send() (*UpdateEventSourceMappingOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateEventSourceMappingOutput), nil -} - -// DeleteEventSourceMappingRequest returns a request value for making API operation for -// AWS Lambda. -// -// Deletes an event source mapping. -// -// // Example sending a request using the DeleteEventSourceMappingRequest method. -// req := client.DeleteEventSourceMappingRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteEventSourceMapping -func (c *Lambda) DeleteEventSourceMappingRequest(input *DeleteEventSourceMappingInput) DeleteEventSourceMappingRequest { - op := &aws.Operation{ - Name: opDeleteEventSourceMapping, - HTTPMethod: "DELETE", - HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", - } - - if input == nil { - input = &DeleteEventSourceMappingInput{} - } - - output := &UpdateEventSourceMappingOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return DeleteEventSourceMappingRequest{Request: req, Input: input, Copy: c.DeleteEventSourceMappingRequest} -} - -const opDeleteFunction = "DeleteFunction" - -// DeleteFunctionRequest is a API request type for the DeleteFunction API operation. -type DeleteFunctionRequest struct { - *aws.Request - Input *DeleteFunctionInput - Copy func(*DeleteFunctionInput) DeleteFunctionRequest -} - -// Send marshals and sends the DeleteFunction API request. -func (r DeleteFunctionRequest) Send() (*DeleteFunctionOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*DeleteFunctionOutput), nil -} - -// DeleteFunctionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Deletes a Lambda function. To delete a specific function version, use the -// Qualifier parameter. Otherwise, all versions and aliases are deleted. Event -// source mappings are not deleted. -// -// This operation requires permission for the lambda:DeleteFunction action. -// -// // Example sending a request using the DeleteFunctionRequest method. -// req := client.DeleteFunctionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteFunction -func (c *Lambda) DeleteFunctionRequest(input *DeleteFunctionInput) DeleteFunctionRequest { - op := &aws.Operation{ - Name: opDeleteFunction, - HTTPMethod: "DELETE", - HTTPPath: "/2015-03-31/functions/{FunctionName}", - } - - if input == nil { - input = &DeleteFunctionInput{} - } - - output := &DeleteFunctionOutput{} - req := c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output.responseMetadata = aws.Response{Request: req} - - return DeleteFunctionRequest{Request: req, Input: input, Copy: c.DeleteFunctionRequest} -} - -const opDeleteFunctionConcurrency = "DeleteFunctionConcurrency" - -// DeleteFunctionConcurrencyRequest is a API request type for the DeleteFunctionConcurrency API operation. -type DeleteFunctionConcurrencyRequest struct { - *aws.Request - Input *DeleteFunctionConcurrencyInput - Copy func(*DeleteFunctionConcurrencyInput) DeleteFunctionConcurrencyRequest -} - -// Send marshals and sends the DeleteFunctionConcurrency API request. -func (r DeleteFunctionConcurrencyRequest) Send() (*DeleteFunctionConcurrencyOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*DeleteFunctionConcurrencyOutput), nil -} - -// DeleteFunctionConcurrencyRequest returns a request value for making API operation for -// AWS Lambda. -// -// Removes concurrent execution limits from this function. For more information, -// see Managing Concurrency (http://docs.aws.amazon.com/lambda/latest/dg/concurrent-executions.html). -// -// // Example sending a request using the DeleteFunctionConcurrencyRequest method. -// req := client.DeleteFunctionConcurrencyRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteFunctionConcurrency -func (c *Lambda) DeleteFunctionConcurrencyRequest(input *DeleteFunctionConcurrencyInput) DeleteFunctionConcurrencyRequest { - op := &aws.Operation{ - Name: opDeleteFunctionConcurrency, - HTTPMethod: "DELETE", - HTTPPath: "/2017-10-31/functions/{FunctionName}/concurrency", - } - - if input == nil { - input = &DeleteFunctionConcurrencyInput{} - } - - output := &DeleteFunctionConcurrencyOutput{} - req := c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output.responseMetadata = aws.Response{Request: req} - - return DeleteFunctionConcurrencyRequest{Request: req, Input: input, Copy: c.DeleteFunctionConcurrencyRequest} -} - -const opDeleteLayerVersion = "DeleteLayerVersion" - -// DeleteLayerVersionRequest is a API request type for the DeleteLayerVersion API operation. -type DeleteLayerVersionRequest struct { - *aws.Request - Input *DeleteLayerVersionInput - Copy func(*DeleteLayerVersionInput) DeleteLayerVersionRequest -} - -// Send marshals and sends the DeleteLayerVersion API request. -func (r DeleteLayerVersionRequest) Send() (*DeleteLayerVersionOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*DeleteLayerVersionOutput), nil -} - -// DeleteLayerVersionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Deletes a version of a function layer. Deleted versions can no longer be -// viewed or added to functions. However, a copy of the version remains in Lambda -// until no functions refer to it. -// -// // Example sending a request using the DeleteLayerVersionRequest method. -// req := client.DeleteLayerVersionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteLayerVersion -func (c *Lambda) DeleteLayerVersionRequest(input *DeleteLayerVersionInput) DeleteLayerVersionRequest { - op := &aws.Operation{ - Name: opDeleteLayerVersion, - HTTPMethod: "DELETE", - HTTPPath: "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", - } - - if input == nil { - input = &DeleteLayerVersionInput{} - } - - output := &DeleteLayerVersionOutput{} - req := c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output.responseMetadata = aws.Response{Request: req} - - return DeleteLayerVersionRequest{Request: req, Input: input, Copy: c.DeleteLayerVersionRequest} -} - -const opGetAccountSettings = "GetAccountSettings" - -// GetAccountSettingsRequest is a API request type for the GetAccountSettings API operation. -type GetAccountSettingsRequest struct { - *aws.Request - Input *GetAccountSettingsInput - Copy func(*GetAccountSettingsInput) GetAccountSettingsRequest -} - -// Send marshals and sends the GetAccountSettings API request. -func (r GetAccountSettingsRequest) Send() (*GetAccountSettingsOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*GetAccountSettingsOutput), nil -} - -// GetAccountSettingsRequest returns a request value for making API operation for -// AWS Lambda. -// -// Retrieves details about your account's limits (http://docs.aws.amazon.com/lambda/latest/dg/limits.html) -// and usage in a region. -// -// // Example sending a request using the GetAccountSettingsRequest method. -// req := client.GetAccountSettingsRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetAccountSettings -func (c *Lambda) GetAccountSettingsRequest(input *GetAccountSettingsInput) GetAccountSettingsRequest { - op := &aws.Operation{ - Name: opGetAccountSettings, - HTTPMethod: "GET", - HTTPPath: "/2016-08-19/account-settings/", - } - - if input == nil { - input = &GetAccountSettingsInput{} - } - - output := &GetAccountSettingsOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetAccountSettingsRequest{Request: req, Input: input, Copy: c.GetAccountSettingsRequest} -} - -const opGetAlias = "GetAlias" - -// GetAliasRequest is a API request type for the GetAlias API operation. -type GetAliasRequest struct { - *aws.Request - Input *GetAliasInput - Copy func(*GetAliasInput) GetAliasRequest -} - -// Send marshals and sends the GetAlias API request. -func (r GetAliasRequest) Send() (*UpdateAliasOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateAliasOutput), nil -} - -// GetAliasRequest returns a request value for making API operation for -// AWS Lambda. -// -// Returns the specified alias information such as the alias ARN, description, -// and function version it is pointing to. For more information, see Introduction -// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). -// -// This requires permission for the lambda:GetAlias action. -// -// // Example sending a request using the GetAliasRequest method. -// req := client.GetAliasRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetAlias -func (c *Lambda) GetAliasRequest(input *GetAliasInput) GetAliasRequest { - op := &aws.Operation{ - Name: opGetAlias, - HTTPMethod: "GET", - HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", - } - - if input == nil { - input = &GetAliasInput{} - } - - output := &UpdateAliasOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetAliasRequest{Request: req, Input: input, Copy: c.GetAliasRequest} -} - -const opGetEventSourceMapping = "GetEventSourceMapping" - -// GetEventSourceMappingRequest is a API request type for the GetEventSourceMapping API operation. -type GetEventSourceMappingRequest struct { - *aws.Request - Input *GetEventSourceMappingInput - Copy func(*GetEventSourceMappingInput) GetEventSourceMappingRequest -} - -// Send marshals and sends the GetEventSourceMapping API request. -func (r GetEventSourceMappingRequest) Send() (*UpdateEventSourceMappingOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateEventSourceMappingOutput), nil -} - -// GetEventSourceMappingRequest returns a request value for making API operation for -// AWS Lambda. -// -// Returns details about an event source mapping. -// -// // Example sending a request using the GetEventSourceMappingRequest method. -// req := client.GetEventSourceMappingRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetEventSourceMapping -func (c *Lambda) GetEventSourceMappingRequest(input *GetEventSourceMappingInput) GetEventSourceMappingRequest { - op := &aws.Operation{ - Name: opGetEventSourceMapping, - HTTPMethod: "GET", - HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", - } - - if input == nil { - input = &GetEventSourceMappingInput{} - } - - output := &UpdateEventSourceMappingOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetEventSourceMappingRequest{Request: req, Input: input, Copy: c.GetEventSourceMappingRequest} -} - -const opGetFunction = "GetFunction" - -// GetFunctionRequest is a API request type for the GetFunction API operation. -type GetFunctionRequest struct { - *aws.Request - Input *GetFunctionInput - Copy func(*GetFunctionInput) GetFunctionRequest -} - -// Send marshals and sends the GetFunction API request. -func (r GetFunctionRequest) Send() (*GetFunctionOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*GetFunctionOutput), nil -} - -// GetFunctionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Returns the configuration information of the Lambda function and a presigned -// URL link to the .zip file you uploaded with CreateFunction so you can download -// the .zip file. Note that the URL is valid for up to 10 minutes. The configuration -// information is the same information you provided as parameters when uploading -// the function. -// -// Use the Qualifier parameter to retrieve a published version of the function. -// Otherwise, returns the unpublished version ($LATEST). For more information, -// see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). -// -// This operation requires permission for the lambda:GetFunction action. -// -// // Example sending a request using the GetFunctionRequest method. -// req := client.GetFunctionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetFunction -func (c *Lambda) GetFunctionRequest(input *GetFunctionInput) GetFunctionRequest { - op := &aws.Operation{ - Name: opGetFunction, - HTTPMethod: "GET", - HTTPPath: "/2015-03-31/functions/{FunctionName}", - } - - if input == nil { - input = &GetFunctionInput{} - } - - output := &GetFunctionOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetFunctionRequest{Request: req, Input: input, Copy: c.GetFunctionRequest} -} - -const opGetFunctionConfiguration = "GetFunctionConfiguration" - -// GetFunctionConfigurationRequest is a API request type for the GetFunctionConfiguration API operation. -type GetFunctionConfigurationRequest struct { - *aws.Request - Input *GetFunctionConfigurationInput - Copy func(*GetFunctionConfigurationInput) GetFunctionConfigurationRequest -} - -// Send marshals and sends the GetFunctionConfiguration API request. -func (r GetFunctionConfigurationRequest) Send() (*UpdateFunctionConfigurationOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateFunctionConfigurationOutput), nil -} - -// GetFunctionConfigurationRequest returns a request value for making API operation for -// AWS Lambda. -// -// Returns the configuration information of the Lambda function. This the same -// information you provided as parameters when uploading the function by using -// CreateFunction. -// -// If you are using the versioning feature, you can retrieve this information -// for a specific function version by using the optional Qualifier parameter -// and specifying the function version or alias that points to it. If you don't -// provide it, the API returns information about the $LATEST version of the -// function. For more information about versioning, see AWS Lambda Function -// Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). -// -// This operation requires permission for the lambda:GetFunctionConfiguration -// operation. -// -// // Example sending a request using the GetFunctionConfigurationRequest method. -// req := client.GetFunctionConfigurationRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetFunctionConfiguration -func (c *Lambda) GetFunctionConfigurationRequest(input *GetFunctionConfigurationInput) GetFunctionConfigurationRequest { - op := &aws.Operation{ - Name: opGetFunctionConfiguration, - HTTPMethod: "GET", - HTTPPath: "/2015-03-31/functions/{FunctionName}/configuration", - } - - if input == nil { - input = &GetFunctionConfigurationInput{} - } - - output := &UpdateFunctionConfigurationOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetFunctionConfigurationRequest{Request: req, Input: input, Copy: c.GetFunctionConfigurationRequest} -} - -const opGetLayerVersion = "GetLayerVersion" - -// GetLayerVersionRequest is a API request type for the GetLayerVersion API operation. -type GetLayerVersionRequest struct { - *aws.Request - Input *GetLayerVersionInput - Copy func(*GetLayerVersionInput) GetLayerVersionRequest -} - -// Send marshals and sends the GetLayerVersion API request. -func (r GetLayerVersionRequest) Send() (*GetLayerVersionOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*GetLayerVersionOutput), nil -} - -// GetLayerVersionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Returns information about a version of a function layer, with a link to download -// the layer archive that's valid for 10 minutes. -// -// // Example sending a request using the GetLayerVersionRequest method. -// req := client.GetLayerVersionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetLayerVersion -func (c *Lambda) GetLayerVersionRequest(input *GetLayerVersionInput) GetLayerVersionRequest { - op := &aws.Operation{ - Name: opGetLayerVersion, - HTTPMethod: "GET", - HTTPPath: "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", - } - - if input == nil { - input = &GetLayerVersionInput{} - } - - output := &GetLayerVersionOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetLayerVersionRequest{Request: req, Input: input, Copy: c.GetLayerVersionRequest} -} - -const opGetLayerVersionPolicy = "GetLayerVersionPolicy" - -// GetLayerVersionPolicyRequest is a API request type for the GetLayerVersionPolicy API operation. -type GetLayerVersionPolicyRequest struct { - *aws.Request - Input *GetLayerVersionPolicyInput - Copy func(*GetLayerVersionPolicyInput) GetLayerVersionPolicyRequest -} - -// Send marshals and sends the GetLayerVersionPolicy API request. -func (r GetLayerVersionPolicyRequest) Send() (*GetLayerVersionPolicyOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*GetLayerVersionPolicyOutput), nil -} - -// GetLayerVersionPolicyRequest returns a request value for making API operation for -// AWS Lambda. -// -// Returns the permission policy for a layer version. For more information, -// see AddLayerVersionPermission. -// -// // Example sending a request using the GetLayerVersionPolicyRequest method. -// req := client.GetLayerVersionPolicyRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetLayerVersionPolicy -func (c *Lambda) GetLayerVersionPolicyRequest(input *GetLayerVersionPolicyInput) GetLayerVersionPolicyRequest { - op := &aws.Operation{ - Name: opGetLayerVersionPolicy, - HTTPMethod: "GET", - HTTPPath: "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", - } - - if input == nil { - input = &GetLayerVersionPolicyInput{} - } - - output := &GetLayerVersionPolicyOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetLayerVersionPolicyRequest{Request: req, Input: input, Copy: c.GetLayerVersionPolicyRequest} -} - -const opGetPolicy = "GetPolicy" - -// GetPolicyRequest is a API request type for the GetPolicy API operation. -type GetPolicyRequest struct { - *aws.Request - Input *GetPolicyInput - Copy func(*GetPolicyInput) GetPolicyRequest -} - -// Send marshals and sends the GetPolicy API request. -func (r GetPolicyRequest) Send() (*GetPolicyOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*GetPolicyOutput), nil -} - -// GetPolicyRequest returns a request value for making API operation for -// AWS Lambda. -// -// Returns the resource policy associated with the specified Lambda function. -// -// This action requires permission for the lambda:GetPolicy action. -// -// // Example sending a request using the GetPolicyRequest method. -// req := client.GetPolicyRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetPolicy -func (c *Lambda) GetPolicyRequest(input *GetPolicyInput) GetPolicyRequest { - op := &aws.Operation{ - Name: opGetPolicy, - HTTPMethod: "GET", - HTTPPath: "/2015-03-31/functions/{FunctionName}/policy", - } - - if input == nil { - input = &GetPolicyInput{} - } - - output := &GetPolicyOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetPolicyRequest{Request: req, Input: input, Copy: c.GetPolicyRequest} -} - -const opInvoke = "Invoke" - -// InvokeRequest is a API request type for the Invoke API operation. -type InvokeRequest struct { - *aws.Request - Input *InvokeInput - Copy func(*InvokeInput) InvokeRequest -} - -// Send marshals and sends the Invoke API request. -func (r InvokeRequest) Send() (*InvokeOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*InvokeOutput), nil -} - -// InvokeRequest returns a request value for making API operation for -// AWS Lambda. -// -// Invokes a Lambda function. For an example, see Create the Lambda Function -// and Test It Manually (http://docs.aws.amazon.com/lambda/latest/dg/with-dynamodb-create-function.html#with-dbb-invoke-manually). -// -// Specify just a function name to invoke the latest version of the function. -// To invoke a published version, use the Qualifier parameter to specify a version -// or alias (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). -// -// If you use the RequestResponse (synchronous) invocation option, the function -// will be invoked only once. If you use the Event (asynchronous) invocation -// option, the function will be invoked at least once in response to an event -// and the function must be idempotent to handle this. -// -// For functions with a long timeout, your client may be disconnected during -// synchronous invocation while it waits for a response. Configure your HTTP -// client, SDK, firewall, proxy, or operating system to allow for long connections -// with timeout or keep-alive settings. -// -// This operation requires permission for the lambda:InvokeFunction action. -// -// The TooManyRequestsException noted below will return the following: ConcurrentInvocationLimitExceeded -// will be returned if you have no functions with reserved concurrency and have -// exceeded your account concurrent limit or if a function without reserved -// concurrency exceeds the account's unreserved concurrency limit. ReservedFunctionConcurrentInvocationLimitExceeded -// will be returned when a function with reserved concurrency exceeds its configured -// concurrency limit. -// -// // Example sending a request using the InvokeRequest method. -// req := client.InvokeRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/Invoke -func (c *Lambda) InvokeRequest(input *InvokeInput) InvokeRequest { - op := &aws.Operation{ - Name: opInvoke, - HTTPMethod: "POST", - HTTPPath: "/2015-03-31/functions/{FunctionName}/invocations", - } - - if input == nil { - input = &InvokeInput{} - } - - output := &InvokeOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return InvokeRequest{Request: req, Input: input, Copy: c.InvokeRequest} -} - -const opInvokeAsync = "InvokeAsync" - -// InvokeAsyncRequest is a API request type for the InvokeAsync API operation. -type InvokeAsyncRequest struct { - *aws.Request - Input *InvokeAsyncInput - Copy func(*InvokeAsyncInput) InvokeAsyncRequest -} - -// Send marshals and sends the InvokeAsync API request. -func (r InvokeAsyncRequest) Send() (*InvokeAsyncOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*InvokeAsyncOutput), nil -} - -// InvokeAsyncRequest returns a request value for making API operation for -// AWS Lambda. -// -// For asynchronous function invocation, use Invoke. -// -// Submits an invocation request to AWS Lambda. Upon receiving the request, -// Lambda executes the specified function asynchronously. To see the logs generated -// by the Lambda function execution, see the CloudWatch Logs console. -// -// This operation requires permission for the lambda:InvokeFunction action. -// -// // Example sending a request using the InvokeAsyncRequest method. -// req := client.InvokeAsyncRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/InvokeAsync -func (c *Lambda) InvokeAsyncRequest(input *InvokeAsyncInput) InvokeAsyncRequest { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, InvokeAsync, has been deprecated") - } - op := &aws.Operation{ - Name: opInvokeAsync, - HTTPMethod: "POST", - HTTPPath: "/2014-11-13/functions/{FunctionName}/invoke-async/", - } - - if input == nil { - input = &InvokeAsyncInput{} - } - - output := &InvokeAsyncOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return InvokeAsyncRequest{Request: req, Input: input, Copy: c.InvokeAsyncRequest} -} - -const opListAliases = "ListAliases" - -// ListAliasesRequest is a API request type for the ListAliases API operation. -type ListAliasesRequest struct { - *aws.Request - Input *ListAliasesInput - Copy func(*ListAliasesInput) ListAliasesRequest -} - -// Send marshals and sends the ListAliases API request. -func (r ListAliasesRequest) Send() (*ListAliasesOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*ListAliasesOutput), nil -} - -// ListAliasesRequest returns a request value for making API operation for -// AWS Lambda. -// -// Returns list of aliases created for a Lambda function. For each alias, the -// response includes information such as the alias ARN, description, alias name, -// and the function version to which it points. For more information, see Introduction -// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). -// -// This requires permission for the lambda:ListAliases action. -// -// // Example sending a request using the ListAliasesRequest method. -// req := client.ListAliasesRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListAliases -func (c *Lambda) ListAliasesRequest(input *ListAliasesInput) ListAliasesRequest { - op := &aws.Operation{ - Name: opListAliases, - HTTPMethod: "GET", - HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases", - } - - if input == nil { - input = &ListAliasesInput{} - } - - output := &ListAliasesOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return ListAliasesRequest{Request: req, Input: input, Copy: c.ListAliasesRequest} -} - -const opListEventSourceMappings = "ListEventSourceMappings" - -// ListEventSourceMappingsRequest is a API request type for the ListEventSourceMappings API operation. -type ListEventSourceMappingsRequest struct { - *aws.Request - Input *ListEventSourceMappingsInput - Copy func(*ListEventSourceMappingsInput) ListEventSourceMappingsRequest -} - -// Send marshals and sends the ListEventSourceMappings API request. -func (r ListEventSourceMappingsRequest) Send() (*ListEventSourceMappingsOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*ListEventSourceMappingsOutput), nil -} - -// ListEventSourceMappingsRequest returns a request value for making API operation for -// AWS Lambda. -// -// Lists event source mappings. Specify an EventSourceArn to only show event -// source mappings for a single event source. -// -// // Example sending a request using the ListEventSourceMappingsRequest method. -// req := client.ListEventSourceMappingsRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListEventSourceMappings -func (c *Lambda) ListEventSourceMappingsRequest(input *ListEventSourceMappingsInput) ListEventSourceMappingsRequest { - op := &aws.Operation{ - Name: opListEventSourceMappings, - HTTPMethod: "GET", - HTTPPath: "/2015-03-31/event-source-mappings/", - Paginator: &aws.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "MaxItems", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListEventSourceMappingsInput{} - } - - output := &ListEventSourceMappingsOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return ListEventSourceMappingsRequest{Request: req, Input: input, Copy: c.ListEventSourceMappingsRequest} -} - -// Paginate pages iterates over the pages of a ListEventSourceMappingsRequest operation, -// calling the Next method for each page. Using the paginators Next -// method will depict whether or not there are more pages. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListEventSourceMappings operation. -// req := client.ListEventSourceMappingsRequest(input) -// p := req.Paginate() -// for p.Next() { -// page := p.CurrentPage() -// } -// -// if err := p.Err(); err != nil { -// return err -// } -// -func (p *ListEventSourceMappingsRequest) Paginate(opts ...aws.Option) ListEventSourceMappingsPager { - return ListEventSourceMappingsPager{ - Pager: aws.Pager{ - NewRequest: func() (*aws.Request, error) { - var inCpy *ListEventSourceMappingsInput - if p.Input != nil { - tmp := *p.Input - inCpy = &tmp - } - - req := p.Copy(inCpy) - req.ApplyOptions(opts...) - - return req.Request, nil - }, - }, - } -} - -// ListEventSourceMappingsPager is used to paginate the request. This can be done by -// calling Next and CurrentPage. -type ListEventSourceMappingsPager struct { - aws.Pager -} - -func (p *ListEventSourceMappingsPager) CurrentPage() *ListEventSourceMappingsOutput { - return p.Pager.CurrentPage().(*ListEventSourceMappingsOutput) -} - -const opListFunctions = "ListFunctions" - -// ListFunctionsRequest is a API request type for the ListFunctions API operation. -type ListFunctionsRequest struct { - *aws.Request - Input *ListFunctionsInput - Copy func(*ListFunctionsInput) ListFunctionsRequest -} - -// Send marshals and sends the ListFunctions API request. -func (r ListFunctionsRequest) Send() (*ListFunctionsOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*ListFunctionsOutput), nil -} - -// ListFunctionsRequest returns a request value for making API operation for -// AWS Lambda. -// -// Returns a list of your Lambda functions. For each function, the response -// includes the function configuration information. You must use GetFunction -// to retrieve the code for your function. -// -// This operation requires permission for the lambda:ListFunctions action. -// -// If you are using the versioning feature, you can list all of your functions -// or only $LATEST versions. For information about the versioning feature, see -// AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). -// -// // Example sending a request using the ListFunctionsRequest method. -// req := client.ListFunctionsRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListFunctions -func (c *Lambda) ListFunctionsRequest(input *ListFunctionsInput) ListFunctionsRequest { - op := &aws.Operation{ - Name: opListFunctions, - HTTPMethod: "GET", - HTTPPath: "/2015-03-31/functions/", - Paginator: &aws.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "MaxItems", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListFunctionsInput{} - } - - output := &ListFunctionsOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return ListFunctionsRequest{Request: req, Input: input, Copy: c.ListFunctionsRequest} -} - -// Paginate pages iterates over the pages of a ListFunctionsRequest operation, -// calling the Next method for each page. Using the paginators Next -// method will depict whether or not there are more pages. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListFunctions operation. -// req := client.ListFunctionsRequest(input) -// p := req.Paginate() -// for p.Next() { -// page := p.CurrentPage() -// } -// -// if err := p.Err(); err != nil { -// return err -// } -// -func (p *ListFunctionsRequest) Paginate(opts ...aws.Option) ListFunctionsPager { - return ListFunctionsPager{ - Pager: aws.Pager{ - NewRequest: func() (*aws.Request, error) { - var inCpy *ListFunctionsInput - if p.Input != nil { - tmp := *p.Input - inCpy = &tmp - } - - req := p.Copy(inCpy) - req.ApplyOptions(opts...) - - return req.Request, nil - }, - }, - } -} - -// ListFunctionsPager is used to paginate the request. This can be done by -// calling Next and CurrentPage. -type ListFunctionsPager struct { - aws.Pager -} - -func (p *ListFunctionsPager) CurrentPage() *ListFunctionsOutput { - return p.Pager.CurrentPage().(*ListFunctionsOutput) -} - -const opListLayerVersions = "ListLayerVersions" - -// ListLayerVersionsRequest is a API request type for the ListLayerVersions API operation. -type ListLayerVersionsRequest struct { - *aws.Request - Input *ListLayerVersionsInput - Copy func(*ListLayerVersionsInput) ListLayerVersionsRequest -} - -// Send marshals and sends the ListLayerVersions API request. -func (r ListLayerVersionsRequest) Send() (*ListLayerVersionsOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*ListLayerVersionsOutput), nil -} - -// ListLayerVersionsRequest returns a request value for making API operation for -// AWS Lambda. -// -// Lists the versions of a function layer. Versions that have been deleted aren't -// listed. Specify a runtime identifier (http://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html) -// to list only versions that indicate that they're compatible with that runtime. -// -// // Example sending a request using the ListLayerVersionsRequest method. -// req := client.ListLayerVersionsRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListLayerVersions -func (c *Lambda) ListLayerVersionsRequest(input *ListLayerVersionsInput) ListLayerVersionsRequest { - op := &aws.Operation{ - Name: opListLayerVersions, - HTTPMethod: "GET", - HTTPPath: "/2018-10-31/layers/{LayerName}/versions", - } - - if input == nil { - input = &ListLayerVersionsInput{} - } - - output := &ListLayerVersionsOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return ListLayerVersionsRequest{Request: req, Input: input, Copy: c.ListLayerVersionsRequest} -} - -const opListLayers = "ListLayers" - -// ListLayersRequest is a API request type for the ListLayers API operation. -type ListLayersRequest struct { - *aws.Request - Input *ListLayersInput - Copy func(*ListLayersInput) ListLayersRequest -} - -// Send marshals and sends the ListLayers API request. -func (r ListLayersRequest) Send() (*ListLayersOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*ListLayersOutput), nil -} - -// ListLayersRequest returns a request value for making API operation for -// AWS Lambda. -// -// Lists function layers and shows information about the latest version of each. -// Specify a runtime identifier (http://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html) -// to list only layers that indicate that they're compatible with that runtime. -// -// // Example sending a request using the ListLayersRequest method. -// req := client.ListLayersRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListLayers -func (c *Lambda) ListLayersRequest(input *ListLayersInput) ListLayersRequest { - op := &aws.Operation{ - Name: opListLayers, - HTTPMethod: "GET", - HTTPPath: "/2018-10-31/layers", - } - - if input == nil { - input = &ListLayersInput{} - } - - output := &ListLayersOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return ListLayersRequest{Request: req, Input: input, Copy: c.ListLayersRequest} -} - -const opListTags = "ListTags" - -// ListTagsRequest is a API request type for the ListTags API operation. -type ListTagsRequest struct { - *aws.Request - Input *ListTagsInput - Copy func(*ListTagsInput) ListTagsRequest -} - -// Send marshals and sends the ListTags API request. -func (r ListTagsRequest) Send() (*ListTagsOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*ListTagsOutput), nil -} - -// ListTagsRequest returns a request value for making API operation for -// AWS Lambda. -// -// Returns a list of tags assigned to a function when supplied the function -// ARN (Amazon Resource Name). For more information on Tagging, see Tagging -// Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) -// in the AWS Lambda Developer Guide. -// -// // Example sending a request using the ListTagsRequest method. -// req := client.ListTagsRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListTags -func (c *Lambda) ListTagsRequest(input *ListTagsInput) ListTagsRequest { - op := &aws.Operation{ - Name: opListTags, - HTTPMethod: "GET", - HTTPPath: "/2017-03-31/tags/{ARN}", - } - - if input == nil { - input = &ListTagsInput{} - } - - output := &ListTagsOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return ListTagsRequest{Request: req, Input: input, Copy: c.ListTagsRequest} -} - -const opListVersionsByFunction = "ListVersionsByFunction" - -// ListVersionsByFunctionRequest is a API request type for the ListVersionsByFunction API operation. -type ListVersionsByFunctionRequest struct { - *aws.Request - Input *ListVersionsByFunctionInput - Copy func(*ListVersionsByFunctionInput) ListVersionsByFunctionRequest -} - -// Send marshals and sends the ListVersionsByFunction API request. -func (r ListVersionsByFunctionRequest) Send() (*ListVersionsByFunctionOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*ListVersionsByFunctionOutput), nil -} - -// ListVersionsByFunctionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Lists all versions of a function. For information about versioning, see AWS -// Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). -// -// // Example sending a request using the ListVersionsByFunctionRequest method. -// req := client.ListVersionsByFunctionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListVersionsByFunction -func (c *Lambda) ListVersionsByFunctionRequest(input *ListVersionsByFunctionInput) ListVersionsByFunctionRequest { - op := &aws.Operation{ - Name: opListVersionsByFunction, - HTTPMethod: "GET", - HTTPPath: "/2015-03-31/functions/{FunctionName}/versions", - } - - if input == nil { - input = &ListVersionsByFunctionInput{} - } - - output := &ListVersionsByFunctionOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return ListVersionsByFunctionRequest{Request: req, Input: input, Copy: c.ListVersionsByFunctionRequest} -} - -const opPublishLayerVersion = "PublishLayerVersion" - -// PublishLayerVersionRequest is a API request type for the PublishLayerVersion API operation. -type PublishLayerVersionRequest struct { - *aws.Request - Input *PublishLayerVersionInput - Copy func(*PublishLayerVersionInput) PublishLayerVersionRequest -} - -// Send marshals and sends the PublishLayerVersion API request. -func (r PublishLayerVersionRequest) Send() (*PublishLayerVersionOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*PublishLayerVersionOutput), nil -} - -// PublishLayerVersionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Creates a function layer from a ZIP archive. Each time you call PublishLayerVersion -// with the same version name, a new version is created. -// -// Add layers to your function with CreateFunction or UpdateFunctionConfiguration. -// -// // Example sending a request using the PublishLayerVersionRequest method. -// req := client.PublishLayerVersionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/PublishLayerVersion -func (c *Lambda) PublishLayerVersionRequest(input *PublishLayerVersionInput) PublishLayerVersionRequest { - op := &aws.Operation{ - Name: opPublishLayerVersion, - HTTPMethod: "POST", - HTTPPath: "/2018-10-31/layers/{LayerName}/versions", - } - - if input == nil { - input = &PublishLayerVersionInput{} - } - - output := &PublishLayerVersionOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return PublishLayerVersionRequest{Request: req, Input: input, Copy: c.PublishLayerVersionRequest} -} - -const opPublishVersion = "PublishVersion" - -// PublishVersionRequest is a API request type for the PublishVersion API operation. -type PublishVersionRequest struct { - *aws.Request - Input *PublishVersionInput - Copy func(*PublishVersionInput) PublishVersionRequest -} - -// Send marshals and sends the PublishVersion API request. -func (r PublishVersionRequest) Send() (*UpdateFunctionConfigurationOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateFunctionConfigurationOutput), nil -} - -// PublishVersionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Publishes a version of your function from the current snapshot of $LATEST. -// That is, AWS Lambda takes a snapshot of the function code and configuration -// information from $LATEST and publishes a new version. The code and configuration -// cannot be modified after publication. For information about the versioning -// feature, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). -// -// // Example sending a request using the PublishVersionRequest method. -// req := client.PublishVersionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/PublishVersion -func (c *Lambda) PublishVersionRequest(input *PublishVersionInput) PublishVersionRequest { - op := &aws.Operation{ - Name: opPublishVersion, - HTTPMethod: "POST", - HTTPPath: "/2015-03-31/functions/{FunctionName}/versions", - } - - if input == nil { - input = &PublishVersionInput{} - } - - output := &UpdateFunctionConfigurationOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return PublishVersionRequest{Request: req, Input: input, Copy: c.PublishVersionRequest} -} - -const opPutFunctionConcurrency = "PutFunctionConcurrency" - -// PutFunctionConcurrencyRequest is a API request type for the PutFunctionConcurrency API operation. -type PutFunctionConcurrencyRequest struct { - *aws.Request - Input *PutFunctionConcurrencyInput - Copy func(*PutFunctionConcurrencyInput) PutFunctionConcurrencyRequest -} - -// Send marshals and sends the PutFunctionConcurrency API request. -func (r PutFunctionConcurrencyRequest) Send() (*PutFunctionConcurrencyOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*PutFunctionConcurrencyOutput), nil -} - -// PutFunctionConcurrencyRequest returns a request value for making API operation for -// AWS Lambda. -// -// Sets a limit on the number of concurrent executions available to this function. -// It is a subset of your account's total concurrent execution limit per region. -// Note that Lambda automatically reserves a buffer of 100 concurrent executions -// for functions without any reserved concurrency limit. This means if your -// account limit is 1000, you have a total of 900 available to allocate to individual -// functions. For more information, see Managing Concurrency (http://docs.aws.amazon.com/lambda/latest/dg/concurrent-executions.html). -// -// // Example sending a request using the PutFunctionConcurrencyRequest method. -// req := client.PutFunctionConcurrencyRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/PutFunctionConcurrency -func (c *Lambda) PutFunctionConcurrencyRequest(input *PutFunctionConcurrencyInput) PutFunctionConcurrencyRequest { - op := &aws.Operation{ - Name: opPutFunctionConcurrency, - HTTPMethod: "PUT", - HTTPPath: "/2017-10-31/functions/{FunctionName}/concurrency", - } - - if input == nil { - input = &PutFunctionConcurrencyInput{} - } - - output := &PutFunctionConcurrencyOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return PutFunctionConcurrencyRequest{Request: req, Input: input, Copy: c.PutFunctionConcurrencyRequest} -} - -const opRemoveLayerVersionPermission = "RemoveLayerVersionPermission" - -// RemoveLayerVersionPermissionRequest is a API request type for the RemoveLayerVersionPermission API operation. -type RemoveLayerVersionPermissionRequest struct { - *aws.Request - Input *RemoveLayerVersionPermissionInput - Copy func(*RemoveLayerVersionPermissionInput) RemoveLayerVersionPermissionRequest -} - -// Send marshals and sends the RemoveLayerVersionPermission API request. -func (r RemoveLayerVersionPermissionRequest) Send() (*RemoveLayerVersionPermissionOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*RemoveLayerVersionPermissionOutput), nil -} - -// RemoveLayerVersionPermissionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Removes a statement from the permissions policy for a layer version. For -// more information, see AddLayerVersionPermission. -// -// // Example sending a request using the RemoveLayerVersionPermissionRequest method. -// req := client.RemoveLayerVersionPermissionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/RemoveLayerVersionPermission -func (c *Lambda) RemoveLayerVersionPermissionRequest(input *RemoveLayerVersionPermissionInput) RemoveLayerVersionPermissionRequest { - op := &aws.Operation{ - Name: opRemoveLayerVersionPermission, - HTTPMethod: "DELETE", - HTTPPath: "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy/{StatementId}", - } - - if input == nil { - input = &RemoveLayerVersionPermissionInput{} - } - - output := &RemoveLayerVersionPermissionOutput{} - req := c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output.responseMetadata = aws.Response{Request: req} - - return RemoveLayerVersionPermissionRequest{Request: req, Input: input, Copy: c.RemoveLayerVersionPermissionRequest} -} - -const opRemovePermission = "RemovePermission" - -// RemovePermissionRequest is a API request type for the RemovePermission API operation. -type RemovePermissionRequest struct { - *aws.Request - Input *RemovePermissionInput - Copy func(*RemovePermissionInput) RemovePermissionRequest -} - -// Send marshals and sends the RemovePermission API request. -func (r RemovePermissionRequest) Send() (*RemovePermissionOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*RemovePermissionOutput), nil -} - -// RemovePermissionRequest returns a request value for making API operation for -// AWS Lambda. -// -// Removes permissions from a function. You can remove individual permissions -// from an resource policy associated with a Lambda function by providing a -// statement ID that you provided when you added the permission. When you remove -// permissions, disable the event source mapping or trigger configuration first -// to avoid errors. -// -// Permissions apply to the Amazon Resource Name (ARN) used to invoke the function, -// which can be unqualified (the unpublished version of the function), or include -// a version or alias. If a client uses a version or alias to invoke a function, -// use the Qualifier parameter to apply permissions to that ARN. For more information -// about versioning, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). -// -// You need permission for the lambda:RemovePermission action. -// -// // Example sending a request using the RemovePermissionRequest method. -// req := client.RemovePermissionRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/RemovePermission -func (c *Lambda) RemovePermissionRequest(input *RemovePermissionInput) RemovePermissionRequest { - op := &aws.Operation{ - Name: opRemovePermission, - HTTPMethod: "DELETE", - HTTPPath: "/2015-03-31/functions/{FunctionName}/policy/{StatementId}", - } - - if input == nil { - input = &RemovePermissionInput{} - } - - output := &RemovePermissionOutput{} - req := c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output.responseMetadata = aws.Response{Request: req} - - return RemovePermissionRequest{Request: req, Input: input, Copy: c.RemovePermissionRequest} -} - -const opTagResource = "TagResource" - -// TagResourceRequest is a API request type for the TagResource API operation. -type TagResourceRequest struct { - *aws.Request - Input *TagResourceInput - Copy func(*TagResourceInput) TagResourceRequest -} - -// Send marshals and sends the TagResource API request. -func (r TagResourceRequest) Send() (*TagResourceOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*TagResourceOutput), nil -} - -// TagResourceRequest returns a request value for making API operation for -// AWS Lambda. -// -// Creates a list of tags (key-value pairs) on the Lambda function. Requires -// the Lambda function ARN (Amazon Resource Name). If a key is specified without -// a value, Lambda creates a tag with the specified key and a value of null. -// For more information, see Tagging Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) -// in the AWS Lambda Developer Guide. -// -// // Example sending a request using the TagResourceRequest method. -// req := client.TagResourceRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/TagResource -func (c *Lambda) TagResourceRequest(input *TagResourceInput) TagResourceRequest { - op := &aws.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/2017-03-31/tags/{ARN}", - } - - if input == nil { - input = &TagResourceInput{} - } - - output := &TagResourceOutput{} - req := c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output.responseMetadata = aws.Response{Request: req} - - return TagResourceRequest{Request: req, Input: input, Copy: c.TagResourceRequest} -} - -const opUntagResource = "UntagResource" - -// UntagResourceRequest is a API request type for the UntagResource API operation. -type UntagResourceRequest struct { - *aws.Request - Input *UntagResourceInput - Copy func(*UntagResourceInput) UntagResourceRequest -} - -// Send marshals and sends the UntagResource API request. -func (r UntagResourceRequest) Send() (*UntagResourceOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UntagResourceOutput), nil -} - -// UntagResourceRequest returns a request value for making API operation for -// AWS Lambda. -// -// Removes tags from a Lambda function. Requires the function ARN (Amazon Resource -// Name). For more information, see Tagging Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) -// in the AWS Lambda Developer Guide. -// -// // Example sending a request using the UntagResourceRequest method. -// req := client.UntagResourceRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UntagResource -func (c *Lambda) UntagResourceRequest(input *UntagResourceInput) UntagResourceRequest { - op := &aws.Operation{ - Name: opUntagResource, - HTTPMethod: "DELETE", - HTTPPath: "/2017-03-31/tags/{ARN}", - } - - if input == nil { - input = &UntagResourceInput{} - } - - output := &UntagResourceOutput{} - req := c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output.responseMetadata = aws.Response{Request: req} - - return UntagResourceRequest{Request: req, Input: input, Copy: c.UntagResourceRequest} -} - -const opUpdateAlias = "UpdateAlias" - -// UpdateAliasRequest is a API request type for the UpdateAlias API operation. -type UpdateAliasRequest struct { - *aws.Request - Input *UpdateAliasInput - Copy func(*UpdateAliasInput) UpdateAliasRequest -} - -// Send marshals and sends the UpdateAlias API request. -func (r UpdateAliasRequest) Send() (*UpdateAliasOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateAliasOutput), nil -} - -// UpdateAliasRequest returns a request value for making API operation for -// AWS Lambda. -// -// Using this API you can update the function version to which the alias points -// and the alias description. For more information, see Introduction to AWS -// Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). -// -// This requires permission for the lambda:UpdateAlias action. -// -// // Example sending a request using the UpdateAliasRequest method. -// req := client.UpdateAliasRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UpdateAlias -func (c *Lambda) UpdateAliasRequest(input *UpdateAliasInput) UpdateAliasRequest { - op := &aws.Operation{ - Name: opUpdateAlias, - HTTPMethod: "PUT", - HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", - } - - if input == nil { - input = &UpdateAliasInput{} - } - - output := &UpdateAliasOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return UpdateAliasRequest{Request: req, Input: input, Copy: c.UpdateAliasRequest} -} - -const opUpdateEventSourceMapping = "UpdateEventSourceMapping" - -// UpdateEventSourceMappingRequest is a API request type for the UpdateEventSourceMapping API operation. -type UpdateEventSourceMappingRequest struct { - *aws.Request - Input *UpdateEventSourceMappingInput - Copy func(*UpdateEventSourceMappingInput) UpdateEventSourceMappingRequest -} - -// Send marshals and sends the UpdateEventSourceMapping API request. -func (r UpdateEventSourceMappingRequest) Send() (*UpdateEventSourceMappingOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateEventSourceMappingOutput), nil -} - -// UpdateEventSourceMappingRequest returns a request value for making API operation for -// AWS Lambda. -// -// Updates an event source mapping. You can change the function that AWS Lambda -// invokes, or pause invocation and resume later from the same location. -// -// // Example sending a request using the UpdateEventSourceMappingRequest method. -// req := client.UpdateEventSourceMappingRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UpdateEventSourceMapping -func (c *Lambda) UpdateEventSourceMappingRequest(input *UpdateEventSourceMappingInput) UpdateEventSourceMappingRequest { - op := &aws.Operation{ - Name: opUpdateEventSourceMapping, - HTTPMethod: "PUT", - HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", - } - - if input == nil { - input = &UpdateEventSourceMappingInput{} - } - - output := &UpdateEventSourceMappingOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return UpdateEventSourceMappingRequest{Request: req, Input: input, Copy: c.UpdateEventSourceMappingRequest} -} - -const opUpdateFunctionCode = "UpdateFunctionCode" - -// UpdateFunctionCodeRequest is a API request type for the UpdateFunctionCode API operation. -type UpdateFunctionCodeRequest struct { - *aws.Request - Input *UpdateFunctionCodeInput - Copy func(*UpdateFunctionCodeInput) UpdateFunctionCodeRequest -} - -// Send marshals and sends the UpdateFunctionCode API request. -func (r UpdateFunctionCodeRequest) Send() (*UpdateFunctionConfigurationOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateFunctionConfigurationOutput), nil -} - -// UpdateFunctionCodeRequest returns a request value for making API operation for -// AWS Lambda. -// -// Updates the code for the specified Lambda function. This operation must only -// be used on an existing Lambda function and cannot be used to update the function -// configuration. -// -// If you are using the versioning feature, note this API will always update -// the $LATEST version of your Lambda function. For information about the versioning -// feature, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). -// -// This operation requires permission for the lambda:UpdateFunctionCode action. -// -// // Example sending a request using the UpdateFunctionCodeRequest method. -// req := client.UpdateFunctionCodeRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UpdateFunctionCode -func (c *Lambda) UpdateFunctionCodeRequest(input *UpdateFunctionCodeInput) UpdateFunctionCodeRequest { - op := &aws.Operation{ - Name: opUpdateFunctionCode, - HTTPMethod: "PUT", - HTTPPath: "/2015-03-31/functions/{FunctionName}/code", - } - - if input == nil { - input = &UpdateFunctionCodeInput{} - } - - output := &UpdateFunctionConfigurationOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return UpdateFunctionCodeRequest{Request: req, Input: input, Copy: c.UpdateFunctionCodeRequest} -} - -const opUpdateFunctionConfiguration = "UpdateFunctionConfiguration" - -// UpdateFunctionConfigurationRequest is a API request type for the UpdateFunctionConfiguration API operation. -type UpdateFunctionConfigurationRequest struct { - *aws.Request - Input *UpdateFunctionConfigurationInput - Copy func(*UpdateFunctionConfigurationInput) UpdateFunctionConfigurationRequest -} - -// Send marshals and sends the UpdateFunctionConfiguration API request. -func (r UpdateFunctionConfigurationRequest) Send() (*UpdateFunctionConfigurationOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*UpdateFunctionConfigurationOutput), nil -} - -// UpdateFunctionConfigurationRequest returns a request value for making API operation for -// AWS Lambda. -// -// Updates the configuration parameters for the specified Lambda function by -// using the values provided in the request. You provide only the parameters -// you want to change. This operation must only be used on an existing Lambda -// function and cannot be used to update the function's code. -// -// If you are using the versioning feature, note this API will always update -// the $LATEST version of your Lambda function. For information about the versioning -// feature, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). -// -// This operation requires permission for the lambda:UpdateFunctionConfiguration -// action. -// -// // Example sending a request using the UpdateFunctionConfigurationRequest method. -// req := client.UpdateFunctionConfigurationRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UpdateFunctionConfiguration -func (c *Lambda) UpdateFunctionConfigurationRequest(input *UpdateFunctionConfigurationInput) UpdateFunctionConfigurationRequest { - op := &aws.Operation{ - Name: opUpdateFunctionConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/2015-03-31/functions/{FunctionName}/configuration", - } - - if input == nil { - input = &UpdateFunctionConfigurationInput{} - } - - output := &UpdateFunctionConfigurationOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return UpdateFunctionConfigurationRequest{Request: req, Input: input, Copy: c.UpdateFunctionConfigurationRequest} -} - -// Provides limits of code size and concurrency associated with the current -// account and region. For more information or to request a limit increase for -// concurrent executions, see Lambda Limits (http://docs.aws.amazon.com/lambda/latest/dg/limits.html). -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/AccountLimit -type AccountLimit struct { - _ struct{} `type:"structure"` - - // Size, in bytes, of code/dependencies that you can zip into a deployment package - // (uncompressed zip/jar size) for uploading. The default limit is 250 MB. - CodeSizeUnzipped *int64 `type:"long"` - - // Size, in bytes, of a single zipped code/dependencies package you can upload - // for your Lambda function(.zip/.jar file). Try using Amazon S3 for uploading - // larger files. Default limit is 50 MB. - CodeSizeZipped *int64 `type:"long"` - - // Number of simultaneous executions of your function per region. The default - // limit is 1000. - ConcurrentExecutions *int64 `type:"integer"` - - // Maximum size, in bytes, of a code package you can upload per region. The - // default size is 75 GB. - TotalCodeSize *int64 `type:"long"` - - // The number of concurrent executions available to functions that do not have - // concurrency limits set. For more information, see Managing Concurrency (http://docs.aws.amazon.com/lambda/latest/dg/concurrent-executions.html). - UnreservedConcurrentExecutions *int64 `type:"integer"` -} - -// String returns the string representation -func (s AccountLimit) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccountLimit) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s AccountLimit) MarshalFields(e protocol.FieldEncoder) error { - if s.CodeSizeUnzipped != nil { - v := *s.CodeSizeUnzipped - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CodeSizeUnzipped", protocol.Int64Value(v), metadata) - } - if s.CodeSizeZipped != nil { - v := *s.CodeSizeZipped - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CodeSizeZipped", protocol.Int64Value(v), metadata) - } - if s.ConcurrentExecutions != nil { - v := *s.ConcurrentExecutions - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "ConcurrentExecutions", protocol.Int64Value(v), metadata) - } - if s.TotalCodeSize != nil { - v := *s.TotalCodeSize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "TotalCodeSize", protocol.Int64Value(v), metadata) - } - if s.UnreservedConcurrentExecutions != nil { - v := *s.UnreservedConcurrentExecutions - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "UnreservedConcurrentExecutions", protocol.Int64Value(v), metadata) - } - return nil -} - -// Provides code size usage and function count associated with the current account -// and region. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/AccountUsage -type AccountUsage struct { - _ struct{} `type:"structure"` - - // The number of your account's existing functions per region. - FunctionCount *int64 `type:"long"` - - // Total size, in bytes, of the account's deployment packages per region. - TotalCodeSize *int64 `type:"long"` -} - -// String returns the string representation -func (s AccountUsage) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccountUsage) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s AccountUsage) MarshalFields(e protocol.FieldEncoder) error { - if s.FunctionCount != nil { - v := *s.FunctionCount - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "FunctionCount", protocol.Int64Value(v), metadata) - } - if s.TotalCodeSize != nil { - v := *s.TotalCodeSize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "TotalCodeSize", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/AddLayerVersionPermissionRequest -type AddLayerVersionPermissionInput struct { - _ struct{} `type:"structure"` - - // The API action that grants access to the layer. For example, lambda:GetLayerVersion. - // - // Action is a required field - Action *string `type:"string" required:"true"` - - // The name of the layer. - // - // LayerName is a required field - LayerName *string `location:"uri" locationName:"LayerName" min:"1" type:"string" required:"true"` - - // With the principal set to *, grant permission to all accounts in the specified - // organization. - OrganizationId *string `type:"string"` - - // An account ID, or * to grant permission to all AWS accounts. - // - // Principal is a required field - Principal *string `type:"string" required:"true"` - - // Only update the policy if the revision ID matches the ID specified. Use this - // option to avoid modifying a policy that has changed since you last read it. - RevisionId *string `location:"querystring" locationName:"RevisionId" type:"string"` - - // An identifier that distinguishes the policy from others on the same layer - // version. - // - // StatementId is a required field - StatementId *string `min:"1" type:"string" required:"true"` - - // The version number. - // - // VersionNumber is a required field - VersionNumber *int64 `location:"uri" locationName:"VersionNumber" type:"long" required:"true"` -} - -// String returns the string representation -func (s AddLayerVersionPermissionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AddLayerVersionPermissionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AddLayerVersionPermissionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "AddLayerVersionPermissionInput"} - - if s.Action == nil { - invalidParams.Add(aws.NewErrParamRequired("Action")) - } - - if s.LayerName == nil { - invalidParams.Add(aws.NewErrParamRequired("LayerName")) - } - if s.LayerName != nil && len(*s.LayerName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("LayerName", 1)) - } - - if s.Principal == nil { - invalidParams.Add(aws.NewErrParamRequired("Principal")) - } - - if s.StatementId == nil { - invalidParams.Add(aws.NewErrParamRequired("StatementId")) - } - if s.StatementId != nil && len(*s.StatementId) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("StatementId", 1)) - } - - if s.VersionNumber == nil { - invalidParams.Add(aws.NewErrParamRequired("VersionNumber")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s AddLayerVersionPermissionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.Action != nil { - v := *s.Action - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Action", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.OrganizationId != nil { - v := *s.OrganizationId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "OrganizationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Principal != nil { - v := *s.Principal - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Principal", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.StatementId != nil { - v := *s.StatementId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "StatementId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LayerName != nil { - v := *s.LayerName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "LayerName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.VersionNumber != nil { - v := *s.VersionNumber - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "VersionNumber", protocol.Int64Value(v), metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/AddLayerVersionPermissionResponse -type AddLayerVersionPermissionOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // A unique identifier for the current revision of the policy. - RevisionId *string `type:"string"` - - // The permission statement. - Statement *string `type:"string"` -} - -// String returns the string representation -func (s AddLayerVersionPermissionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AddLayerVersionPermissionOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s AddLayerVersionPermissionOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s AddLayerVersionPermissionOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Statement != nil { - v := *s.Statement - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Statement", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/AddPermissionRequest -type AddPermissionInput struct { - _ struct{} `type:"structure"` - - // The AWS Lambda action you want to allow in this statement. Each Lambda action - // is a string starting with lambda: followed by the API name . For example, - // lambda:CreateFunction. You can use wildcard (lambda:*) to grant permission - // for all AWS Lambda actions. - // - // Action is a required field - Action *string `type:"string" required:"true"` - - // A unique token that must be supplied by the principal invoking the function. - // This is currently only used for Alexa Smart Home functions. - EventSourceToken *string `type:"string"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // The principal who is getting this permission. The principal can be an AWS - // service (e.g. s3.amazonaws.com or sns.amazonaws.com) for service triggers, - // or an account ID for cross-account access. If you specify a service as a - // principal, use the SourceArn parameter to limit who can invoke the function - // through that service. - // - // Principal is a required field - Principal *string `type:"string" required:"true"` - - // Specify a version or alias to add permissions to a published version of the - // function. - Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` - - // An optional value you can use to ensure you are updating the latest update - // of the function version or alias. If the RevisionID you pass doesn't match - // the latest RevisionId of the function or alias, it will fail with an error - // message, advising you to retrieve the latest function version or alias RevisionID - // using either GetFunction or GetAlias - RevisionId *string `type:"string"` - - // This parameter is used for S3 and SES. The AWS account ID (without a hyphen) - // of the source owner. For example, if the SourceArn identifies a bucket, then - // this is the bucket owner's account ID. You can use this additional condition - // to ensure the bucket you specify is owned by a specific account (it is possible - // the bucket owner deleted the bucket and some other AWS account created the - // bucket). You can also use this condition to specify all sources (that is, - // you don't specify the SourceArn) owned by a specific account. - SourceAccount *string `type:"string"` - - // The Amazon Resource Name of the invoker. - // - // If you add a permission to a service principal without providing the source - // ARN, any AWS account that creates a mapping to your function ARN can invoke - // your Lambda function. - SourceArn *string `type:"string"` - - // A unique statement identifier. - // - // StatementId is a required field - StatementId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s AddPermissionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AddPermissionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AddPermissionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "AddPermissionInput"} - - if s.Action == nil { - invalidParams.Add(aws.NewErrParamRequired("Action")) - } - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if s.Principal == nil { - invalidParams.Add(aws.NewErrParamRequired("Principal")) - } - if s.Qualifier != nil && len(*s.Qualifier) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Qualifier", 1)) - } - - if s.StatementId == nil { - invalidParams.Add(aws.NewErrParamRequired("StatementId")) - } - if s.StatementId != nil && len(*s.StatementId) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("StatementId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s AddPermissionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.Action != nil { - v := *s.Action - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Action", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.EventSourceToken != nil { - v := *s.EventSourceToken - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "EventSourceToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Principal != nil { - v := *s.Principal - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Principal", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.SourceAccount != nil { - v := *s.SourceAccount - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "SourceAccount", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.SourceArn != nil { - v := *s.SourceArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "SourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.StatementId != nil { - v := *s.StatementId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "StatementId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Qualifier != nil { - v := *s.Qualifier - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Qualifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/AddPermissionResponse -type AddPermissionOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The permission statement you specified in the request. The response returns - // the same as a string using a backslash ("\") as an escape character in the - // JSON. - Statement *string `type:"string"` -} - -// String returns the string representation -func (s AddPermissionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AddPermissionOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s AddPermissionOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s AddPermissionOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.Statement != nil { - v := *s.Statement - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Statement", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// The alias's traffic shifting (http://docs.aws.amazon.com/lambda/latest/dg/lambda-traffic-shifting-using-aliases.html) -// configuration. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/AliasRoutingConfiguration -type AliasRoutingConfiguration struct { - _ struct{} `type:"structure"` - - // The name of the second alias, and the percentage of traffic that is routed - // to it. - AdditionalVersionWeights map[string]float64 `type:"map"` -} - -// String returns the string representation -func (s AliasRoutingConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AliasRoutingConfiguration) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s AliasRoutingConfiguration) MarshalFields(e protocol.FieldEncoder) error { - if len(s.AdditionalVersionWeights) > 0 { - v := s.AdditionalVersionWeights - - metadata := protocol.Metadata{} - ms0 := e.Map(protocol.BodyTarget, "AdditionalVersionWeights", metadata) - ms0.Start() - for k1, v1 := range v { - ms0.MapSetValue(k1, protocol.Float64Value(v1)) - } - ms0.End() - - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/CreateAliasRequest -type CreateAliasInput struct { - _ struct{} `type:"structure"` - - // Description of the alias. - Description *string `type:"string"` - - // The name of the lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // Lambda function version for which you are creating the alias. - // - // FunctionVersion is a required field - FunctionVersion *string `min:"1" type:"string" required:"true"` - - // Name for the alias you are creating. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Specifies an additional version your alias can point to, allowing you to - // dictate what percentage of traffic will invoke each version. For more information, - // see Traffic Shifting Using Aliases (http://docs.aws.amazon.com/lambda/latest/dg/lambda-traffic-shifting-using-aliases.html). - RoutingConfig *AliasRoutingConfiguration `type:"structure"` -} - -// String returns the string representation -func (s CreateAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateAliasInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "CreateAliasInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if s.FunctionVersion == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionVersion")) - } - if s.FunctionVersion != nil && len(*s.FunctionVersion) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionVersion", 1)) - } - - if s.Name == nil { - invalidParams.Add(aws.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s CreateAliasInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionVersion != nil { - v := *s.FunctionVersion - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "FunctionVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Name != nil { - v := *s.Name - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RoutingConfig != nil { - v := s.RoutingConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "RoutingConfig", v, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/CreateEventSourceMappingRequest -type CreateEventSourceMappingInput struct { - _ struct{} `type:"structure"` - - // The maximum number of items to retrieve in a single batch. - // - // * Amazon Kinesis - Default 100. Max 10,000. - // - // * Amazon DynamoDB Streams - Default 100. Max 1,000. - // - // * Amazon Simple Queue Service - Default 10. Max 10. - BatchSize *int64 `min:"1" type:"integer"` - - // Disables the event source mapping to pause polling and invocation. - Enabled *bool `type:"boolean"` - - // The Amazon Resource Name (ARN) of the event source. - // - // * Amazon Kinesis - The ARN of the data stream or a stream consumer. - // - // * Amazon DynamoDB Streams - The ARN of the stream. - // - // * Amazon Simple Queue Service - The ARN of the queue. - // - // EventSourceArn is a required field - EventSourceArn *string `type:"string" required:"true"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Version or Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it's limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `min:"1" type:"string" required:"true"` - - // The position in a stream from which to start reading. Required for Amazon - // Kinesis and Amazon DynamoDB Streams sources. AT_TIMESTAMP is only supported - // for Amazon Kinesis streams. - StartingPosition EventSourcePosition `type:"string" enum:"true"` - - // With StartingPosition set to AT_TIMESTAMP, the Unix time in seconds from - // which to start reading. - StartingPositionTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` -} - -// String returns the string representation -func (s CreateEventSourceMappingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateEventSourceMappingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateEventSourceMappingInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "CreateEventSourceMappingInput"} - if s.BatchSize != nil && *s.BatchSize < 1 { - invalidParams.Add(aws.NewErrParamMinValue("BatchSize", 1)) - } - - if s.EventSourceArn == nil { - invalidParams.Add(aws.NewErrParamRequired("EventSourceArn")) - } - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s CreateEventSourceMappingInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.BatchSize != nil { - v := *s.BatchSize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "BatchSize", protocol.Int64Value(v), metadata) - } - if s.Enabled != nil { - v := *s.Enabled - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Enabled", protocol.BoolValue(v), metadata) - } - if s.EventSourceArn != nil { - v := *s.EventSourceArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "EventSourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.StartingPosition) > 0 { - v := s.StartingPosition - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "StartingPosition", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - if s.StartingPositionTimestamp != nil { - v := *s.StartingPositionTimestamp - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "StartingPositionTimestamp", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/CreateFunctionRequest -type CreateFunctionInput struct { - _ struct{} `type:"structure"` - - // The code for the function. - // - // Code is a required field - Code *FunctionCode `type:"structure" required:"true"` - - // A dead letter queue configuration that specifies the queue or topic where - // Lambda sends asynchronous events when they fail processing. For more information, - // see Dead Letter Queues (http://docs.aws.amazon.com/lambda/latest/dg/dlq.html). - DeadLetterConfig *DeadLetterConfig `type:"structure"` - - // A description of the function. - Description *string `type:"string"` - - // Environment variables that are accessible from function code during execution. - Environment *Environment `type:"structure"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `min:"1" type:"string" required:"true"` - - // The name of the method within your code that Lambda calls to execute your - // function. For more information, see Programming Model (http://docs.aws.amazon.com/lambda/latest/dg/programming-model-v2.html). - // - // Handler is a required field - Handler *string `type:"string" required:"true"` - - // The ARN of the KMS key used to encrypt your function's environment variables. - // If not provided, AWS Lambda will use a default service key. - KMSKeyArn *string `type:"string"` - - // A list of function layers (http://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html) - // to add to the function's execution environment. - Layers []string `type:"list"` - - // The amount of memory that your function has access to. Increasing the function's - // memory also increases it's CPU allocation. The default value is 128 MB. The - // value must be a multiple of 64 MB. - MemorySize *int64 `min:"128" type:"integer"` - - // Set to true to publish the first version of the function during creation. - Publish *bool `type:"boolean"` - - // The Amazon Resource Name (ARN) of the function's execution role (http://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html#lambda-intro-execution-role). - // - // Role is a required field - Role *string `type:"string" required:"true"` - - // The runtime version for the function. - // - // Runtime is a required field - Runtime Runtime `type:"string" required:"true" enum:"true"` - - // The list of tags (key-value pairs) assigned to the new function. For more - // information, see Tagging Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) - // in the AWS Lambda Developer Guide. - Tags map[string]string `type:"map"` - - // The amount of time that Lambda allows a function to run before terminating - // it. The default is 3 seconds. The maximum allowed value is 900 seconds. - Timeout *int64 `min:"1" type:"integer"` - - // Set Mode to Active to sample and trace a subset of incoming requests with - // AWS X-Ray. - TracingConfig *TracingConfig `type:"structure"` - - // If your Lambda function accesses resources in a VPC, you provide this parameter - // identifying the list of security group IDs and subnet IDs. These must belong - // to the same VPC. You must provide at least one security group and one subnet - // ID. - VpcConfig *VpcConfig `type:"structure"` -} - -// String returns the string representation -func (s CreateFunctionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateFunctionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateFunctionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "CreateFunctionInput"} - - if s.Code == nil { - invalidParams.Add(aws.NewErrParamRequired("Code")) - } - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if s.Handler == nil { - invalidParams.Add(aws.NewErrParamRequired("Handler")) - } - if s.MemorySize != nil && *s.MemorySize < 128 { - invalidParams.Add(aws.NewErrParamMinValue("MemorySize", 128)) - } - - if s.Role == nil { - invalidParams.Add(aws.NewErrParamRequired("Role")) - } - if len(s.Runtime) == 0 { - invalidParams.Add(aws.NewErrParamRequired("Runtime")) - } - if s.Timeout != nil && *s.Timeout < 1 { - invalidParams.Add(aws.NewErrParamMinValue("Timeout", 1)) - } - if s.Code != nil { - if err := s.Code.Validate(); err != nil { - invalidParams.AddNested("Code", err.(aws.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s CreateFunctionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.Code != nil { - v := s.Code - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Code", v, metadata) - } - if s.DeadLetterConfig != nil { - v := s.DeadLetterConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "DeadLetterConfig", v, metadata) - } - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Environment != nil { - v := s.Environment - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Environment", v, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Handler != nil { - v := *s.Handler - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Handler", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.KMSKeyArn != nil { - v := *s.KMSKeyArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "KMSKeyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.Layers) > 0 { - v := s.Layers - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "Layers", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - if s.MemorySize != nil { - v := *s.MemorySize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "MemorySize", protocol.Int64Value(v), metadata) - } - if s.Publish != nil { - v := *s.Publish - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Publish", protocol.BoolValue(v), metadata) - } - if s.Role != nil { - v := *s.Role - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.Runtime) > 0 { - v := s.Runtime - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Runtime", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - if len(s.Tags) > 0 { - v := s.Tags - - metadata := protocol.Metadata{} - ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) - ms0.Start() - for k1, v1 := range v { - ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ms0.End() - - } - if s.Timeout != nil { - v := *s.Timeout - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Timeout", protocol.Int64Value(v), metadata) - } - if s.TracingConfig != nil { - v := s.TracingConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "TracingConfig", v, metadata) - } - if s.VpcConfig != nil { - v := s.VpcConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "VpcConfig", v, metadata) - } - return nil -} - -// The dead letter queue (http://docs.aws.amazon.com/lambda/latest/dg/dlq.html) -// for failed asynchronous invocations. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeadLetterConfig -type DeadLetterConfig struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic. - TargetArn *string `type:"string"` -} - -// String returns the string representation -func (s DeadLetterConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeadLetterConfig) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s DeadLetterConfig) MarshalFields(e protocol.FieldEncoder) error { - if s.TargetArn != nil { - v := *s.TargetArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "TargetArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteAliasRequest -type DeleteAliasInput struct { - _ struct{} `type:"structure"` - - // The name of the lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // Name of the alias to delete. - // - // Name is a required field - Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAliasInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "DeleteAliasInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if s.Name == nil { - invalidParams.Add(aws.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s DeleteAliasInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Name != nil { - v := *s.Name - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteAliasOutput -type DeleteAliasOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response -} - -// String returns the string representation -func (s DeleteAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteAliasOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s DeleteAliasOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s DeleteAliasOutput) MarshalFields(e protocol.FieldEncoder) error { - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteEventSourceMappingRequest -type DeleteEventSourceMappingInput struct { - _ struct{} `type:"structure"` - - // The identifier of the event source mapping. - // - // UUID is a required field - UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteEventSourceMappingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteEventSourceMappingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteEventSourceMappingInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "DeleteEventSourceMappingInput"} - - if s.UUID == nil { - invalidParams.Add(aws.NewErrParamRequired("UUID")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s DeleteEventSourceMappingInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.UUID != nil { - v := *s.UUID - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "UUID", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteFunctionConcurrencyRequest -type DeleteFunctionConcurrencyInput struct { - _ struct{} `type:"structure"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteFunctionConcurrencyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteFunctionConcurrencyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteFunctionConcurrencyInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "DeleteFunctionConcurrencyInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s DeleteFunctionConcurrencyInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteFunctionConcurrencyOutput -type DeleteFunctionConcurrencyOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response -} - -// String returns the string representation -func (s DeleteFunctionConcurrencyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteFunctionConcurrencyOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s DeleteFunctionConcurrencyOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s DeleteFunctionConcurrencyOutput) MarshalFields(e protocol.FieldEncoder) error { - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteFunctionRequest -type DeleteFunctionInput struct { - _ struct{} `type:"structure"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // Specify a version to delete. You cannot delete a version that is referenced - // by an alias. - Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` -} - -// String returns the string representation -func (s DeleteFunctionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteFunctionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteFunctionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "DeleteFunctionInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.Qualifier != nil && len(*s.Qualifier) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Qualifier", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s DeleteFunctionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Qualifier != nil { - v := *s.Qualifier - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Qualifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteFunctionOutput -type DeleteFunctionOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response -} - -// String returns the string representation -func (s DeleteFunctionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteFunctionOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s DeleteFunctionOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s DeleteFunctionOutput) MarshalFields(e protocol.FieldEncoder) error { - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteLayerVersionRequest -type DeleteLayerVersionInput struct { - _ struct{} `type:"structure"` - - // The name of the layer. - // - // LayerName is a required field - LayerName *string `location:"uri" locationName:"LayerName" min:"1" type:"string" required:"true"` - - // The version number. - // - // VersionNumber is a required field - VersionNumber *int64 `location:"uri" locationName:"VersionNumber" type:"long" required:"true"` -} - -// String returns the string representation -func (s DeleteLayerVersionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteLayerVersionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteLayerVersionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "DeleteLayerVersionInput"} - - if s.LayerName == nil { - invalidParams.Add(aws.NewErrParamRequired("LayerName")) - } - if s.LayerName != nil && len(*s.LayerName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("LayerName", 1)) - } - - if s.VersionNumber == nil { - invalidParams.Add(aws.NewErrParamRequired("VersionNumber")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s DeleteLayerVersionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.LayerName != nil { - v := *s.LayerName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "LayerName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.VersionNumber != nil { - v := *s.VersionNumber - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "VersionNumber", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteLayerVersionOutput -type DeleteLayerVersionOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response -} - -// String returns the string representation -func (s DeleteLayerVersionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteLayerVersionOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s DeleteLayerVersionOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s DeleteLayerVersionOutput) MarshalFields(e protocol.FieldEncoder) error { - return nil -} - -// A function's environment variable settings. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/Environment -type Environment struct { - _ struct{} `type:"structure"` - - // Environment variable key-value pairs. - Variables map[string]string `type:"map"` -} - -// String returns the string representation -func (s Environment) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Environment) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s Environment) MarshalFields(e protocol.FieldEncoder) error { - if len(s.Variables) > 0 { - v := s.Variables - - metadata := protocol.Metadata{} - ms0 := e.Map(protocol.BodyTarget, "Variables", metadata) - ms0.Start() - for k1, v1 := range v { - ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ms0.End() - - } - return nil -} - -// Error messages for environment variables that could not be applied. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/EnvironmentError -type EnvironmentError struct { - _ struct{} `type:"structure"` - - // The error code. - ErrorCode *string `type:"string"` - - // The error message. - Message *string `type:"string"` -} - -// String returns the string representation -func (s EnvironmentError) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EnvironmentError) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s EnvironmentError) MarshalFields(e protocol.FieldEncoder) error { - if s.ErrorCode != nil { - v := *s.ErrorCode - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "ErrorCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Message != nil { - v := *s.Message - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Message", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// The results of a configuration update that applied environment variables. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/EnvironmentResponse -type EnvironmentResponse struct { - _ struct{} `type:"structure"` - - // Error messages for environment variables that could not be applied. - Error *EnvironmentError `type:"structure"` - - // Environment variable key-value pairs. - Variables map[string]string `type:"map"` -} - -// String returns the string representation -func (s EnvironmentResponse) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EnvironmentResponse) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s EnvironmentResponse) MarshalFields(e protocol.FieldEncoder) error { - if s.Error != nil { - v := s.Error - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Error", v, metadata) - } - if len(s.Variables) > 0 { - v := s.Variables - - metadata := protocol.Metadata{} - ms0 := e.Map(protocol.BodyTarget, "Variables", metadata) - ms0.Start() - for k1, v1 := range v { - ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ms0.End() - - } - return nil -} - -// The code for the Lambda function. You can specify either an S3 location, -// or upload a deployment package directly. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/FunctionCode -type FunctionCode struct { - _ struct{} `type:"structure"` - - // An Amazon S3 bucket in the same region as your function. - S3Bucket *string `min:"3" type:"string"` - - // The Amazon S3 key of the deployment package. - S3Key *string `min:"1" type:"string"` - - // For versioned objects, the version of the deployment package object to use. - S3ObjectVersion *string `min:"1" type:"string"` - - // The base64-encoded contents of your zip file containing your deployment package. - // AWS SDK and AWS CLI clients handle the encoding for you. - // - // ZipFile is automatically base64 encoded/decoded by the SDK. - ZipFile []byte `type:"blob"` -} - -// String returns the string representation -func (s FunctionCode) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FunctionCode) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *FunctionCode) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "FunctionCode"} - if s.S3Bucket != nil && len(*s.S3Bucket) < 3 { - invalidParams.Add(aws.NewErrParamMinLen("S3Bucket", 3)) - } - if s.S3Key != nil && len(*s.S3Key) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("S3Key", 1)) - } - if s.S3ObjectVersion != nil && len(*s.S3ObjectVersion) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("S3ObjectVersion", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s FunctionCode) MarshalFields(e protocol.FieldEncoder) error { - if s.S3Bucket != nil { - v := *s.S3Bucket - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "S3Bucket", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.S3Key != nil { - v := *s.S3Key - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "S3Key", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.S3ObjectVersion != nil { - v := *s.S3ObjectVersion - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "S3ObjectVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.ZipFile != nil { - v := s.ZipFile - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "ZipFile", protocol.QuotedValue{ValueMarshaler: protocol.BytesValue(v)}, metadata) - } - return nil -} - -// The object for the Lambda function location. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/FunctionCodeLocation -type FunctionCodeLocation struct { - _ struct{} `type:"structure"` - - // The presigned URL you can use to download the function's .zip file that you - // previously uploaded. The URL is valid for up to 10 minutes. - Location *string `type:"string"` - - // The repository from which you can download the function. - RepositoryType *string `type:"string"` -} - -// String returns the string representation -func (s FunctionCodeLocation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FunctionCodeLocation) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s FunctionCodeLocation) MarshalFields(e protocol.FieldEncoder) error { - if s.Location != nil { - v := *s.Location - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Location", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RepositoryType != nil { - v := *s.RepositoryType - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RepositoryType", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetAccountSettingsRequest -type GetAccountSettingsInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s GetAccountSettingsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAccountSettingsInput) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetAccountSettingsInput) MarshalFields(e protocol.FieldEncoder) error { - - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetAccountSettingsResponse -type GetAccountSettingsOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // Limits related to concurrency and code storage. - AccountLimit *AccountLimit `type:"structure"` - - // The number of functions and amount of storage in use. - AccountUsage *AccountUsage `type:"structure"` -} - -// String returns the string representation -func (s GetAccountSettingsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAccountSettingsOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s GetAccountSettingsOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetAccountSettingsOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.AccountLimit != nil { - v := s.AccountLimit - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "AccountLimit", v, metadata) - } - if s.AccountUsage != nil { - v := s.AccountUsage - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "AccountUsage", v, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetAliasRequest -type GetAliasInput struct { - _ struct{} `type:"structure"` - - // The name of the lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // Name of the alias for which you want to retrieve information. - // - // Name is a required field - Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetAliasInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "GetAliasInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if s.Name == nil { - invalidParams.Add(aws.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetAliasInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Name != nil { - v := *s.Name - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetEventSourceMappingRequest -type GetEventSourceMappingInput struct { - _ struct{} `type:"structure"` - - // The identifier of the event source mapping. - // - // UUID is a required field - UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetEventSourceMappingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetEventSourceMappingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetEventSourceMappingInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "GetEventSourceMappingInput"} - - if s.UUID == nil { - invalidParams.Add(aws.NewErrParamRequired("UUID")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetEventSourceMappingInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.UUID != nil { - v := *s.UUID - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "UUID", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetFunctionConfigurationRequest -type GetFunctionConfigurationInput struct { - _ struct{} `type:"structure"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // Specify a version or alias to get details about a published version of the - // function. - Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` -} - -// String returns the string representation -func (s GetFunctionConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFunctionConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFunctionConfigurationInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "GetFunctionConfigurationInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.Qualifier != nil && len(*s.Qualifier) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Qualifier", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetFunctionConfigurationInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Qualifier != nil { - v := *s.Qualifier - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Qualifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetFunctionRequest -type GetFunctionInput struct { - _ struct{} `type:"structure"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // Specify a version or alias to get details about a published version of the - // function. - Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` -} - -// String returns the string representation -func (s GetFunctionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFunctionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFunctionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "GetFunctionInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.Qualifier != nil && len(*s.Qualifier) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Qualifier", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetFunctionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Qualifier != nil { - v := *s.Qualifier - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Qualifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// This response contains the object for the Lambda function location (see FunctionCodeLocation. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetFunctionResponse -type GetFunctionOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The function's code. - Code *FunctionCodeLocation `type:"structure"` - - // The concurrent execution limit set for this function. For more information, - // see Managing Concurrency (http://docs.aws.amazon.com/lambda/latest/dg/concurrent-executions.html). - Concurrency *PutFunctionConcurrencyOutput `type:"structure"` - - // The function's configuration. - Configuration *UpdateFunctionConfigurationOutput `type:"structure"` - - // Returns the list of tags associated with the function. For more information, - // see Tagging Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) - // in the AWS Lambda Developer Guide. - Tags map[string]string `type:"map"` -} - -// String returns the string representation -func (s GetFunctionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFunctionOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s GetFunctionOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetFunctionOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.Code != nil { - v := s.Code - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Code", v, metadata) - } - if s.Concurrency != nil { - v := s.Concurrency - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Concurrency", v, metadata) - } - if s.Configuration != nil { - v := s.Configuration - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Configuration", v, metadata) - } - if len(s.Tags) > 0 { - v := s.Tags - - metadata := protocol.Metadata{} - ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) - ms0.Start() - for k1, v1 := range v { - ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ms0.End() - - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetLayerVersionRequest -type GetLayerVersionInput struct { - _ struct{} `type:"structure"` - - // The name of the layer. - // - // LayerName is a required field - LayerName *string `location:"uri" locationName:"LayerName" min:"1" type:"string" required:"true"` - - // The version number. - // - // VersionNumber is a required field - VersionNumber *int64 `location:"uri" locationName:"VersionNumber" type:"long" required:"true"` -} - -// String returns the string representation -func (s GetLayerVersionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetLayerVersionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetLayerVersionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "GetLayerVersionInput"} - - if s.LayerName == nil { - invalidParams.Add(aws.NewErrParamRequired("LayerName")) - } - if s.LayerName != nil && len(*s.LayerName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("LayerName", 1)) - } - - if s.VersionNumber == nil { - invalidParams.Add(aws.NewErrParamRequired("VersionNumber")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetLayerVersionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.LayerName != nil { - v := *s.LayerName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "LayerName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.VersionNumber != nil { - v := *s.VersionNumber - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "VersionNumber", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetLayerVersionResponse -type GetLayerVersionOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The layer's compatible runtimes. - CompatibleRuntimes []Runtime `type:"list"` - - // Details about the layer version. - Content *LayerVersionContentOutput `type:"structure"` - - // The date that the layer version was created, in ISO-8601 format (https://www.w3.org/TR/NOTE-datetime) - // (YYYY-MM-DDThh:mm:ss.sTZD). - CreatedDate *string `type:"string"` - - // The description of the version. - Description *string `type:"string"` - - // The Amazon Resource Name (ARN) of the function layer. - LayerArn *string `min:"1" type:"string"` - - // The ARN of the layer version. - LayerVersionArn *string `min:"1" type:"string"` - - // The layer's software license. - LicenseInfo *string `type:"string"` - - // The version number. - Version *int64 `type:"long"` -} - -// String returns the string representation -func (s GetLayerVersionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetLayerVersionOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s GetLayerVersionOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetLayerVersionOutput) MarshalFields(e protocol.FieldEncoder) error { - if len(s.CompatibleRuntimes) > 0 { - v := s.CompatibleRuntimes - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "CompatibleRuntimes", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - if s.Content != nil { - v := s.Content - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Content", v, metadata) - } - if s.CreatedDate != nil { - v := *s.CreatedDate - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CreatedDate", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LayerArn != nil { - v := *s.LayerArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LayerArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LayerVersionArn != nil { - v := *s.LayerVersionArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LayerVersionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LicenseInfo != nil { - v := *s.LicenseInfo - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LicenseInfo", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Version != nil { - v := *s.Version - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Version", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetLayerVersionPolicyRequest -type GetLayerVersionPolicyInput struct { - _ struct{} `type:"structure"` - - // The name of the layer. - // - // LayerName is a required field - LayerName *string `location:"uri" locationName:"LayerName" min:"1" type:"string" required:"true"` - - // The version number. - // - // VersionNumber is a required field - VersionNumber *int64 `location:"uri" locationName:"VersionNumber" type:"long" required:"true"` -} - -// String returns the string representation -func (s GetLayerVersionPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetLayerVersionPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetLayerVersionPolicyInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "GetLayerVersionPolicyInput"} - - if s.LayerName == nil { - invalidParams.Add(aws.NewErrParamRequired("LayerName")) - } - if s.LayerName != nil && len(*s.LayerName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("LayerName", 1)) - } - - if s.VersionNumber == nil { - invalidParams.Add(aws.NewErrParamRequired("VersionNumber")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetLayerVersionPolicyInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.LayerName != nil { - v := *s.LayerName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "LayerName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.VersionNumber != nil { - v := *s.VersionNumber - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "VersionNumber", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetLayerVersionPolicyResponse -type GetLayerVersionPolicyOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The policy document. - Policy *string `type:"string"` - - // A unique identifier for the current revision of the policy. - RevisionId *string `type:"string"` -} - -// String returns the string representation -func (s GetLayerVersionPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetLayerVersionPolicyOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s GetLayerVersionPolicyOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetLayerVersionPolicyOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.Policy != nil { - v := *s.Policy - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Policy", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetPolicyRequest -type GetPolicyInput struct { - _ struct{} `type:"structure"` - - // The name of the lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // You can specify this optional query parameter to specify a function version - // or an alias name in which case this API will return all permissions associated - // with the specific qualified ARN. If you don't provide this parameter, the - // API will return permissions that apply to the unqualified function ARN. - Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` -} - -// String returns the string representation -func (s GetPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPolicyInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "GetPolicyInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.Qualifier != nil && len(*s.Qualifier) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Qualifier", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetPolicyInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Qualifier != nil { - v := *s.Qualifier - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Qualifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetPolicyResponse -type GetPolicyOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The resource policy associated with the specified function. The response - // returns the same as a string using a backslash ("\") as an escape character - // in the JSON. - Policy *string `type:"string"` - - // Represents the latest updated revision of the function or alias. - RevisionId *string `type:"string"` -} - -// String returns the string representation -func (s GetPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPolicyOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s GetPolicyOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GetPolicyOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.Policy != nil { - v := *s.Policy - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Policy", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/InvokeAsyncRequest -type InvokeAsyncInput struct { - _ struct{} `deprecated:"true" type:"structure" payload:"InvokeArgs"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // JSON that you want to provide to your Lambda function as input. - // - // InvokeArgs is a required field - InvokeArgs io.ReadSeeker `type:"blob" required:"true"` -} - -// String returns the string representation -func (s InvokeAsyncInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InvokeAsyncInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InvokeAsyncInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "InvokeAsyncInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if s.InvokeArgs == nil { - invalidParams.Add(aws.NewErrParamRequired("InvokeArgs")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s InvokeAsyncInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.InvokeArgs != nil { - v := s.InvokeArgs - - metadata := protocol.Metadata{} - e.SetStream(protocol.PayloadTarget, "InvokeArgs", protocol.ReadSeekerStream{V: v}, metadata) - } - return nil -} - -// Upon success, it returns empty response. Otherwise, throws an exception. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/InvokeAsyncResponse -type InvokeAsyncOutput struct { - _ struct{} `deprecated:"true" type:"structure"` - - responseMetadata aws.Response - - // It will be 202 upon success. - Status *int64 `location:"statusCode" type:"integer"` -} - -// String returns the string representation -func (s InvokeAsyncOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InvokeAsyncOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s InvokeAsyncOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s InvokeAsyncOutput) MarshalFields(e protocol.FieldEncoder) error { - // ignoring invalid encode state, StatusCode. Status - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/InvocationRequest -type InvokeInput struct { - _ struct{} `type:"structure" payload:"Payload"` - - // Using the ClientContext you can pass client-specific information to the Lambda - // function you are invoking. You can then process the client information in - // your Lambda function as you choose through the context variable. For an example - // of a ClientContext JSON, see PutEvents (http://docs.aws.amazon.com/mobileanalytics/latest/ug/PutEvents.html) - // in the Amazon Mobile Analytics API Reference and User Guide. - // - // The ClientContext JSON must be base64-encoded and has a maximum size of 3583 - // bytes. - // - // ClientContext information is returned only if you use the synchronous (RequestResponse) - // invocation type. - ClientContext *string `location:"header" locationName:"X-Amz-Client-Context" type:"string"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // Choose from the following options. - // - // * RequestResponse (default) - Invoke the function synchronously. Keep - // the connection open until the function returns a response or times out. - // - // * Event - Invoke the function asynchronously. Send events that fail multiple - // times to the function's dead-letter queue (if configured). - // - // * DryRun - Validate parameter values and verify that the user or role - // has permission to invoke the function. - InvocationType InvocationType `location:"header" locationName:"X-Amz-Invocation-Type" type:"string" enum:"true"` - - // You can set this optional parameter to Tail in the request only if you specify - // the InvocationType parameter with value RequestResponse. In this case, AWS - // Lambda returns the base64-encoded last 4 KB of log data produced by your - // Lambda function in the x-amz-log-result header. - LogType LogType `location:"header" locationName:"X-Amz-Log-Type" type:"string" enum:"true"` - - // JSON that you want to provide to your Lambda function as input. - Payload []byte `type:"blob"` - - // Specify a version or alias to invoke a published version of the function. - Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` -} - -// String returns the string representation -func (s InvokeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InvokeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InvokeInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "InvokeInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.Qualifier != nil && len(*s.Qualifier) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Qualifier", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s InvokeInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.ClientContext != nil { - v := *s.ClientContext - - metadata := protocol.Metadata{} - e.SetValue(protocol.HeaderTarget, "X-Amz-Client-Context", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.InvocationType) > 0 { - v := s.InvocationType - - metadata := protocol.Metadata{} - e.SetValue(protocol.HeaderTarget, "X-Amz-Invocation-Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - if len(s.LogType) > 0 { - v := s.LogType - - metadata := protocol.Metadata{} - e.SetValue(protocol.HeaderTarget, "X-Amz-Log-Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Payload != nil { - v := s.Payload - - metadata := protocol.Metadata{} - e.SetStream(protocol.PayloadTarget, "Payload", protocol.BytesStream(v), metadata) - } - if s.Qualifier != nil { - v := *s.Qualifier - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Qualifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Upon success, returns an empty response. Otherwise, throws an exception. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/InvocationResponse -type InvokeOutput struct { - _ struct{} `type:"structure" payload:"Payload"` - - responseMetadata aws.Response - - // The function version that has been executed. This value is returned only - // if the invocation type is RequestResponse. For more information, see Traffic - // Shifting Using Aliases (http://docs.aws.amazon.com/lambda/latest/dg/lambda-traffic-shifting-using-aliases.html). - ExecutedVersion *string `location:"header" locationName:"X-Amz-Executed-Version" min:"1" type:"string"` - - // Indicates whether an error occurred while executing the Lambda function. - // If an error occurred this field will have one of two values; Handled or Unhandled. - // Handled errors are errors that are reported by the function while the Unhandled - // errors are those detected and reported by AWS Lambda. Unhandled errors include - // out of memory errors and function timeouts. For information about how to - // report an Handled error, see Programming Model (http://docs.aws.amazon.com/lambda/latest/dg/programming-model.html). - FunctionError *string `location:"header" locationName:"X-Amz-Function-Error" type:"string"` - - // It is the base64-encoded logs for the Lambda function invocation. This is - // present only if the invocation type is RequestResponse and the logs were - // requested. - LogResult *string `location:"header" locationName:"X-Amz-Log-Result" type:"string"` - - // It is the JSON representation of the object returned by the Lambda function. - // This is present only if the invocation type is RequestResponse. - // - // In the event of a function error this field contains a message describing - // the error. For the Handled errors the Lambda function will report this message. - // For Unhandled errors AWS Lambda reports the message. - Payload []byte `type:"blob"` - - // The HTTP status code will be in the 200 range for successful request. For - // the RequestResponse invocation type this status code will be 200. For the - // Event invocation type this status code will be 202. For the DryRun invocation - // type the status code will be 204. - StatusCode *int64 `location:"statusCode" type:"integer"` -} - -// String returns the string representation -func (s InvokeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InvokeOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s InvokeOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s InvokeOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.ExecutedVersion != nil { - v := *s.ExecutedVersion - - metadata := protocol.Metadata{} - e.SetValue(protocol.HeaderTarget, "X-Amz-Executed-Version", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionError != nil { - v := *s.FunctionError - - metadata := protocol.Metadata{} - e.SetValue(protocol.HeaderTarget, "X-Amz-Function-Error", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LogResult != nil { - v := *s.LogResult - - metadata := protocol.Metadata{} - e.SetValue(protocol.HeaderTarget, "X-Amz-Log-Result", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Payload != nil { - v := s.Payload - - metadata := protocol.Metadata{} - e.SetStream(protocol.PayloadTarget, "Payload", protocol.BytesStream(v), metadata) - } - // ignoring invalid encode state, StatusCode. StatusCode - return nil -} - -// A function layer. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/Layer -type Layer struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the function layer. - Arn *string `min:"1" type:"string"` - - // The size of the layer archive in bytes. - CodeSize *int64 `type:"long"` -} - -// String returns the string representation -func (s Layer) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Layer) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s Layer) MarshalFields(e protocol.FieldEncoder) error { - if s.Arn != nil { - v := *s.Arn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.CodeSize != nil { - v := *s.CodeSize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CodeSize", protocol.Int64Value(v), metadata) - } - return nil -} - -// A ZIP archive that contains the contents of the function layer. You can specify -// either an Amazon S3 location, or upload a layer archive directly. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/LayerVersionContentInput -type LayerVersionContentInput struct { - _ struct{} `type:"structure"` - - // The Amazon S3 bucket of the layer archive. - S3Bucket *string `min:"3" type:"string"` - - // The Amazon S3 key of the layer archive. - S3Key *string `min:"1" type:"string"` - - // For versioned objects, the version of the layer archive object to use. - S3ObjectVersion *string `min:"1" type:"string"` - - // The base64-encoded contents of the layer archive. AWS SDK and AWS CLI clients - // handle the encoding for you. - // - // ZipFile is automatically base64 encoded/decoded by the SDK. - ZipFile []byte `type:"blob"` -} - -// String returns the string representation -func (s LayerVersionContentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LayerVersionContentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LayerVersionContentInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "LayerVersionContentInput"} - if s.S3Bucket != nil && len(*s.S3Bucket) < 3 { - invalidParams.Add(aws.NewErrParamMinLen("S3Bucket", 3)) - } - if s.S3Key != nil && len(*s.S3Key) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("S3Key", 1)) - } - if s.S3ObjectVersion != nil && len(*s.S3ObjectVersion) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("S3ObjectVersion", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s LayerVersionContentInput) MarshalFields(e protocol.FieldEncoder) error { - if s.S3Bucket != nil { - v := *s.S3Bucket - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "S3Bucket", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.S3Key != nil { - v := *s.S3Key - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "S3Key", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.S3ObjectVersion != nil { - v := *s.S3ObjectVersion - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "S3ObjectVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.ZipFile != nil { - v := s.ZipFile - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "ZipFile", protocol.QuotedValue{ValueMarshaler: protocol.BytesValue(v)}, metadata) - } - return nil -} - -// Details about a layer version. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/LayerVersionContentOutput -type LayerVersionContentOutput struct { - _ struct{} `type:"structure"` - - // The SHA-256 hash of the layer archive. - CodeSha256 *string `type:"string"` - - // The size of the layer archive in bytes. - CodeSize *int64 `type:"long"` - - // A link to the layer archive in Amazon S3 that is valid for 10 minutes. - Location *string `type:"string"` -} - -// String returns the string representation -func (s LayerVersionContentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LayerVersionContentOutput) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s LayerVersionContentOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.CodeSha256 != nil { - v := *s.CodeSha256 - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CodeSha256", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.CodeSize != nil { - v := *s.CodeSize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CodeSize", protocol.Int64Value(v), metadata) - } - if s.Location != nil { - v := *s.Location - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Location", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Details about a layer version. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/LayerVersionsListItem -type LayerVersionsListItem struct { - _ struct{} `type:"structure"` - - // The layer's compatible runtimes. - CompatibleRuntimes []Runtime `type:"list"` - - // The date that the version was created, in ISO 8601 format. For example, 2018-11-27T15:10:45.123+0000. - CreatedDate *string `type:"string"` - - // The description of the version. - Description *string `type:"string"` - - // The ARN of the layer version. - LayerVersionArn *string `min:"1" type:"string"` - - // The layer's open-source license. - LicenseInfo *string `type:"string"` - - // The version number. - Version *int64 `type:"long"` -} - -// String returns the string representation -func (s LayerVersionsListItem) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LayerVersionsListItem) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s LayerVersionsListItem) MarshalFields(e protocol.FieldEncoder) error { - if len(s.CompatibleRuntimes) > 0 { - v := s.CompatibleRuntimes - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "CompatibleRuntimes", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - if s.CreatedDate != nil { - v := *s.CreatedDate - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CreatedDate", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LayerVersionArn != nil { - v := *s.LayerVersionArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LayerVersionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LicenseInfo != nil { - v := *s.LicenseInfo - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LicenseInfo", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Version != nil { - v := *s.Version - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Version", protocol.Int64Value(v), metadata) - } - return nil -} - -// Details about a function layer. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/LayersListItem -type LayersListItem struct { - _ struct{} `type:"structure"` - - // The newest version of the layer. - LatestMatchingVersion *LayerVersionsListItem `type:"structure"` - - // The Amazon Resource Name (ARN) of the function layer. - LayerArn *string `min:"1" type:"string"` - - // The name of the layer. - LayerName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s LayersListItem) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LayersListItem) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s LayersListItem) MarshalFields(e protocol.FieldEncoder) error { - if s.LatestMatchingVersion != nil { - v := s.LatestMatchingVersion - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "LatestMatchingVersion", v, metadata) - } - if s.LayerArn != nil { - v := *s.LayerArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LayerArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LayerName != nil { - v := *s.LayerName - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LayerName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListAliasesRequest -type ListAliasesInput struct { - _ struct{} `type:"structure"` - - // The name of the lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // If you specify this optional parameter, the API returns only the aliases - // that are pointing to the specific Lambda function version, otherwise the - // API returns all of the aliases created for the Lambda function. - FunctionVersion *string `location:"querystring" locationName:"FunctionVersion" min:"1" type:"string"` - - // Optional string. An opaque pagination token returned from a previous ListAliases - // operation. If present, indicates where to continue the listing. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` - - // Optional integer. Specifies the maximum number of aliases to return in response. - // This parameter value must be greater than 0. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` -} - -// String returns the string representation -func (s ListAliasesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListAliasesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListAliasesInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "ListAliasesInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.FunctionVersion != nil && len(*s.FunctionVersion) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionVersion", 1)) - } - if s.MaxItems != nil && *s.MaxItems < 1 { - invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListAliasesInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionVersion != nil { - v := *s.FunctionVersion - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "FunctionVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Marker != nil { - v := *s.Marker - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Marker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.MaxItems != nil { - v := *s.MaxItems - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "MaxItems", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListAliasesResponse -type ListAliasesOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // A list of aliases. - Aliases []UpdateAliasOutput `type:"list"` - - // A string, present if there are more aliases. - NextMarker *string `type:"string"` -} - -// String returns the string representation -func (s ListAliasesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListAliasesOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s ListAliasesOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListAliasesOutput) MarshalFields(e protocol.FieldEncoder) error { - if len(s.Aliases) > 0 { - v := s.Aliases - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "Aliases", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddFields(v1) - } - ls0.End() - - } - if s.NextMarker != nil { - v := *s.NextMarker - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "NextMarker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListEventSourceMappingsRequest -type ListEventSourceMappingsInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the event source. - // - // * Amazon Kinesis - The ARN of the data stream or a stream consumer. - // - // * Amazon DynamoDB Streams - The ARN of the stream. - // - // * Amazon Simple Queue Service - The ARN of the queue. - EventSourceArn *string `location:"querystring" locationName:"EventSourceArn" type:"string"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Version or Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it's limited to 64 characters in length. - FunctionName *string `location:"querystring" locationName:"FunctionName" min:"1" type:"string"` - - // A pagination token returned by a previous call. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` - - // The maximum number of event source mappings to return. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` -} - -// String returns the string representation -func (s ListEventSourceMappingsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListEventSourceMappingsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListEventSourceMappingsInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "ListEventSourceMappingsInput"} - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.MaxItems != nil && *s.MaxItems < 1 { - invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListEventSourceMappingsInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.EventSourceArn != nil { - v := *s.EventSourceArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "EventSourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Marker != nil { - v := *s.Marker - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Marker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.MaxItems != nil { - v := *s.MaxItems - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "MaxItems", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListEventSourceMappingsResponse -type ListEventSourceMappingsOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // A list of event source mappings. - EventSourceMappings []UpdateEventSourceMappingOutput `type:"list"` - - // A pagination token that's returned when the response doesn't contain all - // event source mappings. - NextMarker *string `type:"string"` -} - -// String returns the string representation -func (s ListEventSourceMappingsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListEventSourceMappingsOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s ListEventSourceMappingsOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListEventSourceMappingsOutput) MarshalFields(e protocol.FieldEncoder) error { - if len(s.EventSourceMappings) > 0 { - v := s.EventSourceMappings - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "EventSourceMappings", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddFields(v1) - } - ls0.End() - - } - if s.NextMarker != nil { - v := *s.NextMarker - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "NextMarker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListFunctionsRequest -type ListFunctionsInput struct { - _ struct{} `type:"structure"` - - // Set to ALL to list all published versions. If not specified, only the latest - // unpublished version ARN is returned. - FunctionVersion FunctionVersion `location:"querystring" locationName:"FunctionVersion" type:"string" enum:"true"` - - // Optional string. An opaque pagination token returned from a previous ListFunctions - // operation. If present, indicates where to continue the listing. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` - - // Specify a region (e.g. us-east-2) to only list functions that were created - // in that region, or ALL to include functions replicated from any region. If - // specified, you also must specify the FunctionVersion. - MasterRegion *string `location:"querystring" locationName:"MasterRegion" type:"string"` - - // Optional integer. Specifies the maximum number of AWS Lambda functions to - // return in response. This parameter value must be greater than 0. The absolute - // maximum of AWS Lambda functions that can be returned is 50. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` -} - -// String returns the string representation -func (s ListFunctionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListFunctionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListFunctionsInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "ListFunctionsInput"} - if s.MaxItems != nil && *s.MaxItems < 1 { - invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListFunctionsInput) MarshalFields(e protocol.FieldEncoder) error { - - if len(s.FunctionVersion) > 0 { - v := s.FunctionVersion - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "FunctionVersion", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - if s.Marker != nil { - v := *s.Marker - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Marker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.MasterRegion != nil { - v := *s.MasterRegion - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "MasterRegion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.MaxItems != nil { - v := *s.MaxItems - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "MaxItems", protocol.Int64Value(v), metadata) - } - return nil -} - -// A list of Lambda functions. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListFunctionsResponse -type ListFunctionsOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // A list of Lambda functions. - Functions []UpdateFunctionConfigurationOutput `type:"list"` - - // A string, present if there are more functions. - NextMarker *string `type:"string"` -} - -// String returns the string representation -func (s ListFunctionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListFunctionsOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s ListFunctionsOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListFunctionsOutput) MarshalFields(e protocol.FieldEncoder) error { - if len(s.Functions) > 0 { - v := s.Functions - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "Functions", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddFields(v1) - } - ls0.End() - - } - if s.NextMarker != nil { - v := *s.NextMarker - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "NextMarker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListLayerVersionsRequest -type ListLayerVersionsInput struct { - _ struct{} `type:"structure"` - - // A runtime identifier. For example, go1.x. - CompatibleRuntime Runtime `location:"querystring" locationName:"CompatibleRuntime" type:"string" enum:"true"` - - // The name of the layer. - // - // LayerName is a required field - LayerName *string `location:"uri" locationName:"LayerName" min:"1" type:"string" required:"true"` - - // A pagination token returned by a previous call. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` - - // The maximum number of versions to return. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` -} - -// String returns the string representation -func (s ListLayerVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListLayerVersionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListLayerVersionsInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "ListLayerVersionsInput"} - - if s.LayerName == nil { - invalidParams.Add(aws.NewErrParamRequired("LayerName")) - } - if s.LayerName != nil && len(*s.LayerName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("LayerName", 1)) - } - if s.MaxItems != nil && *s.MaxItems < 1 { - invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListLayerVersionsInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.LayerName != nil { - v := *s.LayerName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "LayerName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.CompatibleRuntime) > 0 { - v := s.CompatibleRuntime - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "CompatibleRuntime", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - if s.Marker != nil { - v := *s.Marker - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Marker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.MaxItems != nil { - v := *s.MaxItems - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "MaxItems", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListLayerVersionsResponse -type ListLayerVersionsOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // A list of versions. - LayerVersions []LayerVersionsListItem `type:"list"` - - // A pagination token returned when the response doesn't contain all versions. - NextMarker *string `type:"string"` -} - -// String returns the string representation -func (s ListLayerVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListLayerVersionsOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s ListLayerVersionsOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListLayerVersionsOutput) MarshalFields(e protocol.FieldEncoder) error { - if len(s.LayerVersions) > 0 { - v := s.LayerVersions - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "LayerVersions", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddFields(v1) - } - ls0.End() - - } - if s.NextMarker != nil { - v := *s.NextMarker - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "NextMarker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListLayersRequest -type ListLayersInput struct { - _ struct{} `type:"structure"` - - // A runtime identifier. For example, go1.x. - CompatibleRuntime Runtime `location:"querystring" locationName:"CompatibleRuntime" type:"string" enum:"true"` - - // A pagination token returned by a previous call. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` - - // The maximum number of layers to return. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` -} - -// String returns the string representation -func (s ListLayersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListLayersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListLayersInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "ListLayersInput"} - if s.MaxItems != nil && *s.MaxItems < 1 { - invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListLayersInput) MarshalFields(e protocol.FieldEncoder) error { - - if len(s.CompatibleRuntime) > 0 { - v := s.CompatibleRuntime - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "CompatibleRuntime", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - if s.Marker != nil { - v := *s.Marker - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Marker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.MaxItems != nil { - v := *s.MaxItems - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "MaxItems", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListLayersResponse -type ListLayersOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // A list of function layers. - Layers []LayersListItem `type:"list"` - - // A pagination token returned when the response doesn't contain all layers. - NextMarker *string `type:"string"` -} - -// String returns the string representation -func (s ListLayersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListLayersOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s ListLayersOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListLayersOutput) MarshalFields(e protocol.FieldEncoder) error { - if len(s.Layers) > 0 { - v := s.Layers - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "Layers", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddFields(v1) - } - ls0.End() - - } - if s.NextMarker != nil { - v := *s.NextMarker - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "NextMarker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListTagsRequest -type ListTagsInput struct { - _ struct{} `type:"structure"` - - // The ARN (Amazon Resource Name) of the function. For more information, see - // Tagging Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) - // in the AWS Lambda Developer Guide. - // - // Resource is a required field - Resource *string `location:"uri" locationName:"ARN" type:"string" required:"true"` -} - -// String returns the string representation -func (s ListTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "ListTagsInput"} - - if s.Resource == nil { - invalidParams.Add(aws.NewErrParamRequired("Resource")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListTagsInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.Resource != nil { - v := *s.Resource - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "ARN", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListTagsResponse -type ListTagsOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The list of tags assigned to the function. For more information, see Tagging - // Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) - // in the AWS Lambda Developer Guide. - Tags map[string]string `type:"map"` -} - -// String returns the string representation -func (s ListTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTagsOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s ListTagsOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListTagsOutput) MarshalFields(e protocol.FieldEncoder) error { - if len(s.Tags) > 0 { - v := s.Tags - - metadata := protocol.Metadata{} - ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) - ms0.Start() - for k1, v1 := range v { - ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ms0.End() - - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListVersionsByFunctionRequest -type ListVersionsByFunctionInput struct { - _ struct{} `type:"structure"` - - // The name of the lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // Optional string. An opaque pagination token returned from a previous ListVersionsByFunction - // operation. If present, indicates where to continue the listing. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` - - // Optional integer. Specifies the maximum number of AWS Lambda function versions - // to return in response. This parameter value must be greater than 0. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` -} - -// String returns the string representation -func (s ListVersionsByFunctionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListVersionsByFunctionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListVersionsByFunctionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "ListVersionsByFunctionInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.MaxItems != nil && *s.MaxItems < 1 { - invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListVersionsByFunctionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Marker != nil { - v := *s.Marker - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Marker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.MaxItems != nil { - v := *s.MaxItems - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "MaxItems", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListVersionsByFunctionResponse -type ListVersionsByFunctionOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // A string, present if there are more function versions. - NextMarker *string `type:"string"` - - // A list of Lambda function versions. - Versions []UpdateFunctionConfigurationOutput `type:"list"` -} - -// String returns the string representation -func (s ListVersionsByFunctionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListVersionsByFunctionOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s ListVersionsByFunctionOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s ListVersionsByFunctionOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.NextMarker != nil { - v := *s.NextMarker - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "NextMarker", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.Versions) > 0 { - v := s.Versions - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "Versions", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddFields(v1) - } - ls0.End() - - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/PublishLayerVersionRequest -type PublishLayerVersionInput struct { - _ struct{} `type:"structure"` - - // A list of compatible function runtimes (http://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html). - // Used for filtering with ListLayers and ListLayerVersions. - CompatibleRuntimes []Runtime `type:"list"` - - // The function layer archive. - // - // Content is a required field - Content *LayerVersionContentInput `type:"structure" required:"true"` - - // The description of the version. - Description *string `type:"string"` - - // The name of the layer. - // - // LayerName is a required field - LayerName *string `location:"uri" locationName:"LayerName" min:"1" type:"string" required:"true"` - - // The layer's software license. It can be any of the following: - // - // * An SPDX license identifier (https://spdx.org/licenses/). For example, - // MIT. - // - // * The URL of a license hosted on the internet. For example, https://opensource.org/licenses/MIT. - // - // * The full text of the license. - LicenseInfo *string `type:"string"` -} - -// String returns the string representation -func (s PublishLayerVersionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PublishLayerVersionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PublishLayerVersionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "PublishLayerVersionInput"} - - if s.Content == nil { - invalidParams.Add(aws.NewErrParamRequired("Content")) - } - - if s.LayerName == nil { - invalidParams.Add(aws.NewErrParamRequired("LayerName")) - } - if s.LayerName != nil && len(*s.LayerName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("LayerName", 1)) - } - if s.Content != nil { - if err := s.Content.Validate(); err != nil { - invalidParams.AddNested("Content", err.(aws.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s PublishLayerVersionInput) MarshalFields(e protocol.FieldEncoder) error { - - if len(s.CompatibleRuntimes) > 0 { - v := s.CompatibleRuntimes - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "CompatibleRuntimes", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - if s.Content != nil { - v := s.Content - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Content", v, metadata) - } - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LicenseInfo != nil { - v := *s.LicenseInfo - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LicenseInfo", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LayerName != nil { - v := *s.LayerName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "LayerName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/PublishLayerVersionResponse -type PublishLayerVersionOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The layer's compatible runtimes. - CompatibleRuntimes []Runtime `type:"list"` - - // Details about the layer version. - Content *LayerVersionContentOutput `type:"structure"` - - // The date that the layer version was created, in ISO-8601 format (https://www.w3.org/TR/NOTE-datetime) - // (YYYY-MM-DDThh:mm:ss.sTZD). - CreatedDate *string `type:"string"` - - // The description of the version. - Description *string `type:"string"` - - // The Amazon Resource Name (ARN) of the function layer. - LayerArn *string `min:"1" type:"string"` - - // The ARN of the layer version. - LayerVersionArn *string `min:"1" type:"string"` - - // The layer's software license. - LicenseInfo *string `type:"string"` - - // The version number. - Version *int64 `type:"long"` -} - -// String returns the string representation -func (s PublishLayerVersionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PublishLayerVersionOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s PublishLayerVersionOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s PublishLayerVersionOutput) MarshalFields(e protocol.FieldEncoder) error { - if len(s.CompatibleRuntimes) > 0 { - v := s.CompatibleRuntimes - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "CompatibleRuntimes", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - if s.Content != nil { - v := s.Content - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Content", v, metadata) - } - if s.CreatedDate != nil { - v := *s.CreatedDate - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CreatedDate", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LayerArn != nil { - v := *s.LayerArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LayerArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LayerVersionArn != nil { - v := *s.LayerVersionArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LayerVersionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LicenseInfo != nil { - v := *s.LicenseInfo - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LicenseInfo", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Version != nil { - v := *s.Version - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Version", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/PublishVersionRequest -type PublishVersionInput struct { - _ struct{} `type:"structure"` - - // The SHA256 hash of the deployment package you want to publish. This provides - // validation on the code you are publishing. If you provide this parameter, - // the value must match the SHA256 of the $LATEST version for the publication - // to succeed. You can use the DryRun parameter of UpdateFunctionCode to verify - // the hash value that will be returned before publishing your new version. - CodeSha256 *string `type:"string"` - - // The description for the version you are publishing. If not provided, AWS - // Lambda copies the description from the $LATEST version. - Description *string `type:"string"` - - // The name of the lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // An optional value you can use to ensure you are updating the latest update - // of the function version or alias. If the RevisionID you pass doesn't match - // the latest RevisionId of the function or alias, it will fail with an error - // message, advising you retrieve the latest function version or alias RevisionID - // using either GetFunction or GetAlias. - RevisionId *string `type:"string"` -} - -// String returns the string representation -func (s PublishVersionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PublishVersionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PublishVersionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "PublishVersionInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s PublishVersionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.CodeSha256 != nil { - v := *s.CodeSha256 - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CodeSha256", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/PutFunctionConcurrencyRequest -type PutFunctionConcurrencyInput struct { - _ struct{} `type:"structure"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // The concurrent execution limit reserved for this function. - // - // ReservedConcurrentExecutions is a required field - ReservedConcurrentExecutions *int64 `type:"integer" required:"true"` -} - -// String returns the string representation -func (s PutFunctionConcurrencyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutFunctionConcurrencyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutFunctionConcurrencyInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "PutFunctionConcurrencyInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if s.ReservedConcurrentExecutions == nil { - invalidParams.Add(aws.NewErrParamRequired("ReservedConcurrentExecutions")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s PutFunctionConcurrencyInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.ReservedConcurrentExecutions != nil { - v := *s.ReservedConcurrentExecutions - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "ReservedConcurrentExecutions", protocol.Int64Value(v), metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/Concurrency -type PutFunctionConcurrencyOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The number of concurrent executions reserved for this function. For more - // information, see Managing Concurrency (http://docs.aws.amazon.com/lambda/latest/dg/concurrent-executions.html). - ReservedConcurrentExecutions *int64 `type:"integer"` -} - -// String returns the string representation -func (s PutFunctionConcurrencyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutFunctionConcurrencyOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s PutFunctionConcurrencyOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s PutFunctionConcurrencyOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.ReservedConcurrentExecutions != nil { - v := *s.ReservedConcurrentExecutions - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "ReservedConcurrentExecutions", protocol.Int64Value(v), metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/RemoveLayerVersionPermissionRequest -type RemoveLayerVersionPermissionInput struct { - _ struct{} `type:"structure"` - - // The name of the layer. - // - // LayerName is a required field - LayerName *string `location:"uri" locationName:"LayerName" min:"1" type:"string" required:"true"` - - // Only update the policy if the revision ID matches the ID specified. Use this - // option to avoid modifying a policy that has changed since you last read it. - RevisionId *string `location:"querystring" locationName:"RevisionId" type:"string"` - - // The identifier that was specified when the statement was added. - // - // StatementId is a required field - StatementId *string `location:"uri" locationName:"StatementId" min:"1" type:"string" required:"true"` - - // The version number. - // - // VersionNumber is a required field - VersionNumber *int64 `location:"uri" locationName:"VersionNumber" type:"long" required:"true"` -} - -// String returns the string representation -func (s RemoveLayerVersionPermissionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RemoveLayerVersionPermissionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RemoveLayerVersionPermissionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "RemoveLayerVersionPermissionInput"} - - if s.LayerName == nil { - invalidParams.Add(aws.NewErrParamRequired("LayerName")) - } - if s.LayerName != nil && len(*s.LayerName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("LayerName", 1)) - } - - if s.StatementId == nil { - invalidParams.Add(aws.NewErrParamRequired("StatementId")) - } - if s.StatementId != nil && len(*s.StatementId) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("StatementId", 1)) - } - - if s.VersionNumber == nil { - invalidParams.Add(aws.NewErrParamRequired("VersionNumber")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s RemoveLayerVersionPermissionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.LayerName != nil { - v := *s.LayerName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "LayerName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.StatementId != nil { - v := *s.StatementId - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "StatementId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.VersionNumber != nil { - v := *s.VersionNumber - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "VersionNumber", protocol.Int64Value(v), metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/RemoveLayerVersionPermissionOutput -type RemoveLayerVersionPermissionOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response -} - -// String returns the string representation -func (s RemoveLayerVersionPermissionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RemoveLayerVersionPermissionOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s RemoveLayerVersionPermissionOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s RemoveLayerVersionPermissionOutput) MarshalFields(e protocol.FieldEncoder) error { - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/RemovePermissionRequest -type RemovePermissionInput struct { - _ struct{} `type:"structure"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // Specify a version or alias to remove permissions from a published version - // of the function. - Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` - - // An optional value you can use to ensure you are updating the latest update - // of the function version or alias. If the RevisionID you pass doesn't match - // the latest RevisionId of the function or alias, it will fail with an error - // message, advising you to retrieve the latest function version or alias RevisionID - // using either GetFunction or GetAlias. - RevisionId *string `location:"querystring" locationName:"RevisionId" type:"string"` - - // Statement ID of the permission to remove. - // - // StatementId is a required field - StatementId *string `location:"uri" locationName:"StatementId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s RemovePermissionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RemovePermissionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RemovePermissionInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "RemovePermissionInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.Qualifier != nil && len(*s.Qualifier) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Qualifier", 1)) - } - - if s.StatementId == nil { - invalidParams.Add(aws.NewErrParamRequired("StatementId")) - } - if s.StatementId != nil && len(*s.StatementId) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("StatementId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s RemovePermissionInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.StatementId != nil { - v := *s.StatementId - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "StatementId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Qualifier != nil { - v := *s.Qualifier - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "Qualifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.QueryTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/RemovePermissionOutput -type RemovePermissionOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response -} - -// String returns the string representation -func (s RemovePermissionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RemovePermissionOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s RemovePermissionOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s RemovePermissionOutput) MarshalFields(e protocol.FieldEncoder) error { - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/TagResourceRequest -type TagResourceInput struct { - _ struct{} `type:"structure"` - - // The ARN (Amazon Resource Name) of the Lambda function. For more information, - // see Tagging Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) - // in the AWS Lambda Developer Guide. - // - // Resource is a required field - Resource *string `location:"uri" locationName:"ARN" type:"string" required:"true"` - - // The list of tags (key-value pairs) you are assigning to the Lambda function. - // For more information, see Tagging Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) - // in the AWS Lambda Developer Guide. - // - // Tags is a required field - Tags map[string]string `type:"map" required:"true"` -} - -// String returns the string representation -func (s TagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "TagResourceInput"} - - if s.Resource == nil { - invalidParams.Add(aws.NewErrParamRequired("Resource")) - } - - if s.Tags == nil { - invalidParams.Add(aws.NewErrParamRequired("Tags")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s TagResourceInput) MarshalFields(e protocol.FieldEncoder) error { - - if len(s.Tags) > 0 { - v := s.Tags - - metadata := protocol.Metadata{} - ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) - ms0.Start() - for k1, v1 := range v { - ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ms0.End() - - } - if s.Resource != nil { - v := *s.Resource - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "ARN", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/TagResourceOutput -type TagResourceOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response -} - -// String returns the string representation -func (s TagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TagResourceOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s TagResourceOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s TagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { - return nil -} - -// The function's AWS X-Ray tracing configuration. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/TracingConfig -type TracingConfig struct { - _ struct{} `type:"structure"` - - // The tracing mode. - Mode TracingMode `type:"string" enum:"true"` -} - -// String returns the string representation -func (s TracingConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TracingConfig) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s TracingConfig) MarshalFields(e protocol.FieldEncoder) error { - if len(s.Mode) > 0 { - v := s.Mode - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Mode", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - return nil -} - -// The function's AWS X-Ray tracing configuration. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/TracingConfigResponse -type TracingConfigResponse struct { - _ struct{} `type:"structure"` - - // The tracing mode. - Mode TracingMode `type:"string" enum:"true"` -} - -// String returns the string representation -func (s TracingConfigResponse) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TracingConfigResponse) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s TracingConfigResponse) MarshalFields(e protocol.FieldEncoder) error { - if len(s.Mode) > 0 { - v := s.Mode - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Mode", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UntagResourceRequest -type UntagResourceInput struct { - _ struct{} `type:"structure"` - - // The ARN (Amazon Resource Name) of the function. For more information, see - // Tagging Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) - // in the AWS Lambda Developer Guide. - // - // Resource is a required field - Resource *string `location:"uri" locationName:"ARN" type:"string" required:"true"` - - // The list of tag keys to be deleted from the function. For more information, - // see Tagging Lambda Functions (http://docs.aws.amazon.com/lambda/latest/dg/tagging.html) - // in the AWS Lambda Developer Guide. - // - // TagKeys is a required field - TagKeys []string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` -} - -// String returns the string representation -func (s UntagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UntagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UntagResourceInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "UntagResourceInput"} - - if s.Resource == nil { - invalidParams.Add(aws.NewErrParamRequired("Resource")) - } - - if s.TagKeys == nil { - invalidParams.Add(aws.NewErrParamRequired("TagKeys")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s UntagResourceInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.Resource != nil { - v := *s.Resource - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "ARN", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.TagKeys) > 0 { - v := s.TagKeys - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.QueryTarget, "tagKeys", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UntagResourceOutput -type UntagResourceOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response -} - -// String returns the string representation -func (s UntagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UntagResourceOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s UntagResourceOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s UntagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UpdateAliasRequest -type UpdateAliasInput struct { - _ struct{} `type:"structure"` - - // You can change the description of the alias using this parameter. - Description *string `type:"string"` - - // The name of the lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // Using this parameter you can change the Lambda function version to which - // the alias points. - FunctionVersion *string `min:"1" type:"string"` - - // The alias name. - // - // Name is a required field - Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` - - // An optional value you can use to ensure you are updating the latest update - // of the function version or alias. If the RevisionID you pass doesn't match - // the latest RevisionId of the function or alias, it will fail with an error - // message, advising you retrieve the latest function version or alias RevisionID - // using either GetFunction or GetAlias. - RevisionId *string `type:"string"` - - // Specifies an additional version your alias can point to, allowing you to - // dictate what percentage of traffic will invoke each version. For more information, - // see Traffic Shifting Using Aliases (http://docs.aws.amazon.com/lambda/latest/dg/lambda-traffic-shifting-using-aliases.html). - RoutingConfig *AliasRoutingConfiguration `type:"structure"` -} - -// String returns the string representation -func (s UpdateAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateAliasInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "UpdateAliasInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.FunctionVersion != nil && len(*s.FunctionVersion) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionVersion", 1)) - } - - if s.Name == nil { - invalidParams.Add(aws.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s UpdateAliasInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionVersion != nil { - v := *s.FunctionVersion - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "FunctionVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RoutingConfig != nil { - v := s.RoutingConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "RoutingConfig", v, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Name != nil { - v := *s.Name - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Provides configuration information about a Lambda function version alias. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetAliasOutput -type UpdateAliasOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // Lambda function ARN that is qualified using the alias name as the suffix. - // For example, if you create an alias called BETA that points to a helloworld - // function version, the ARN is arn:aws:lambda:aws-regions:acct-id:function:helloworld:BETA. - AliasArn *string `type:"string"` - - // Alias description. - Description *string `type:"string"` - - // Function version to which the alias points. - FunctionVersion *string `min:"1" type:"string"` - - // Alias name. - Name *string `min:"1" type:"string"` - - // Represents the latest updated revision of the function or alias. - RevisionId *string `type:"string"` - - // Specifies an additional function versions the alias points to, allowing you - // to dictate what percentage of traffic will invoke each version. - RoutingConfig *AliasRoutingConfiguration `type:"structure"` -} - -// String returns the string representation -func (s UpdateAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateAliasOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s UpdateAliasOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s UpdateAliasOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.AliasArn != nil { - v := *s.AliasArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "AliasArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionVersion != nil { - v := *s.FunctionVersion - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "FunctionVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Name != nil { - v := *s.Name - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.RoutingConfig != nil { - v := s.RoutingConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "RoutingConfig", v, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UpdateEventSourceMappingRequest -type UpdateEventSourceMappingInput struct { - _ struct{} `type:"structure"` - - // The maximum number of items to retrieve in a single batch. - // - // * Amazon Kinesis - Default 100. Max 10,000. - // - // * Amazon DynamoDB Streams - Default 100. Max 1,000. - // - // * Amazon Simple Queue Service - Default 10. Max 10. - BatchSize *int64 `min:"1" type:"integer"` - - // Disables the event source mapping to pause polling and invocation. - Enabled *bool `type:"boolean"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Version or Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it's limited to 64 characters in length. - FunctionName *string `min:"1" type:"string"` - - // The identifier of the event source mapping. - // - // UUID is a required field - UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` -} - -// String returns the string representation -func (s UpdateEventSourceMappingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateEventSourceMappingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateEventSourceMappingInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "UpdateEventSourceMappingInput"} - if s.BatchSize != nil && *s.BatchSize < 1 { - invalidParams.Add(aws.NewErrParamMinValue("BatchSize", 1)) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - - if s.UUID == nil { - invalidParams.Add(aws.NewErrParamRequired("UUID")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s UpdateEventSourceMappingInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.BatchSize != nil { - v := *s.BatchSize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "BatchSize", protocol.Int64Value(v), metadata) - } - if s.Enabled != nil { - v := *s.Enabled - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Enabled", protocol.BoolValue(v), metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.UUID != nil { - v := *s.UUID - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "UUID", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// A mapping between an AWS resource and an AWS Lambda function. See CreateEventSourceMapping -// for details. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/GetEventSourceMappingOutput -type UpdateEventSourceMappingOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The maximum number of items to retrieve in a single batch. - BatchSize *int64 `min:"1" type:"integer"` - - // The Amazon Resource Name (ARN) of the event source. - EventSourceArn *string `type:"string"` - - // The ARN of the Lambda function. - FunctionArn *string `type:"string"` - - // The date that the event source mapping was last updated, in Unix time seconds. - LastModified *time.Time `type:"timestamp" timestampFormat:"unix"` - - // The result of the last AWS Lambda invocation of your Lambda function. - LastProcessingResult *string `type:"string"` - - // The state of the event source mapping. It can be one of the following: Creating, - // Enabling, Enabled, Disabling, Disabled, Updating, or Deleting. - State *string `type:"string"` - - // The cause of the last state change, either User initiated or Lambda initiated. - StateTransitionReason *string `type:"string"` - - // The identifier of the event source mapping. - UUID *string `type:"string"` -} - -// String returns the string representation -func (s UpdateEventSourceMappingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateEventSourceMappingOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s UpdateEventSourceMappingOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s UpdateEventSourceMappingOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.BatchSize != nil { - v := *s.BatchSize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "BatchSize", protocol.Int64Value(v), metadata) - } - if s.EventSourceArn != nil { - v := *s.EventSourceArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "EventSourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionArn != nil { - v := *s.FunctionArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "FunctionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LastModified != nil { - v := *s.LastModified - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LastModified", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata) - } - if s.LastProcessingResult != nil { - v := *s.LastProcessingResult - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LastProcessingResult", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.State != nil { - v := *s.State - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "State", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.StateTransitionReason != nil { - v := *s.StateTransitionReason - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "StateTransitionReason", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.UUID != nil { - v := *s.UUID - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "UUID", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UpdateFunctionCodeRequest -type UpdateFunctionCodeInput struct { - _ struct{} `type:"structure"` - - // This boolean parameter can be used to test your request to AWS Lambda to - // update the Lambda function and publish a version as an atomic operation. - // It will do all necessary computation and validation of your code but will - // not upload it or a publish a version. Each time this operation is invoked, - // the CodeSha256 hash value of the provided code will also be computed and - // returned in the response. - DryRun *bool `type:"boolean"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // This boolean parameter can be used to request AWS Lambda to update the Lambda - // function and publish a version as an atomic operation. - Publish *bool `type:"boolean"` - - // An optional value you can use to ensure you are updating the latest update - // of the function version or alias. If the RevisionID you pass doesn't match - // the latest RevisionId of the function or alias, it will fail with an error - // message, advising you to retrieve the latest function version or alias RevisionID - // using either using using either GetFunction or GetAlias. - RevisionId *string `type:"string"` - - // Amazon S3 bucket name where the .zip file containing your deployment package - // is stored. This bucket must reside in the same AWS Region where you are creating - // the Lambda function. - S3Bucket *string `min:"3" type:"string"` - - // The Amazon S3 object (the deployment package) key name you want to upload. - S3Key *string `min:"1" type:"string"` - - // The Amazon S3 object (the deployment package) version you want to upload. - S3ObjectVersion *string `min:"1" type:"string"` - - // The contents of your zip file containing your deployment package. If you - // are using the web API directly, the contents of the zip file must be base64-encoded. - // If you are using the AWS SDKs or the AWS CLI, the SDKs or CLI will do the - // encoding for you. For more information about creating a .zip file, see Execution - // Permissions (http://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html#lambda-intro-execution-role.html). - // - // ZipFile is automatically base64 encoded/decoded by the SDK. - ZipFile []byte `type:"blob"` -} - -// String returns the string representation -func (s UpdateFunctionCodeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateFunctionCodeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateFunctionCodeInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "UpdateFunctionCodeInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.S3Bucket != nil && len(*s.S3Bucket) < 3 { - invalidParams.Add(aws.NewErrParamMinLen("S3Bucket", 3)) - } - if s.S3Key != nil && len(*s.S3Key) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("S3Key", 1)) - } - if s.S3ObjectVersion != nil && len(*s.S3ObjectVersion) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("S3ObjectVersion", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s UpdateFunctionCodeInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.DryRun != nil { - v := *s.DryRun - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "DryRun", protocol.BoolValue(v), metadata) - } - if s.Publish != nil { - v := *s.Publish - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Publish", protocol.BoolValue(v), metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.S3Bucket != nil { - v := *s.S3Bucket - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "S3Bucket", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.S3Key != nil { - v := *s.S3Key - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "S3Key", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.S3ObjectVersion != nil { - v := *s.S3ObjectVersion - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "S3ObjectVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.ZipFile != nil { - v := s.ZipFile - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "ZipFile", protocol.QuotedValue{ValueMarshaler: protocol.BytesValue(v)}, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UpdateFunctionConfigurationRequest -type UpdateFunctionConfigurationInput struct { - _ struct{} `type:"structure"` - - // A dead letter queue configuration that specifies the queue or topic where - // Lambda sends asynchronous events when they fail processing. For more information, - // see Dead Letter Queues (http://docs.aws.amazon.com/lambda/latest/dg/dlq.html). - DeadLetterConfig *DeadLetterConfig `type:"structure"` - - // A short user-defined function description. AWS Lambda does not use this value. - // Assign a meaningful description as you see fit. - Description *string `type:"string"` - - // The parent object that contains your environment's configuration settings. - Environment *Environment `type:"structure"` - - // The name of the Lambda function. - // - // Name formats - // - // * Function name - MyFunction. - // - // * Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. - // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies only to the full ARN. If you specify only the - // function name, it is limited to 64 characters in length. - // - // FunctionName is a required field - FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - - // The function that Lambda calls to begin executing your function. For Node.js, - // it is the module-name.export value in your function. - Handler *string `type:"string"` - - // The Amazon Resource Name (ARN) of the KMS key used to encrypt your function's - // environment variables. If you elect to use the AWS Lambda default service - // key, pass in an empty string ("") for this parameter. - KMSKeyArn *string `type:"string"` - - // A list of function layers (http://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html) - // to add to the function's execution environment. - Layers []string `type:"list"` - - // The amount of memory, in MB, your Lambda function is given. AWS Lambda uses - // this memory size to infer the amount of CPU allocated to your function. Your - // function use-case determines your CPU and memory requirements. For example, - // a database operation might need less memory compared to an image processing - // function. The default value is 128 MB. The value must be a multiple of 64 - // MB. - MemorySize *int64 `min:"128" type:"integer"` - - // An optional value you can use to ensure you are updating the latest update - // of the function version or alias. If the RevisionID you pass doesn't match - // the latest RevisionId of the function or alias, it will fail with an error - // message, advising you to retrieve the latest function version or alias RevisionID - // using either GetFunction or GetAlias. - RevisionId *string `type:"string"` - - // The Amazon Resource Name (ARN) of the IAM role that Lambda will assume when - // it executes your function. - Role *string `type:"string"` - - // The runtime version for the function. - Runtime Runtime `type:"string" enum:"true"` - - // The amount of time that Lambda allows a function to run before terminating - // it. The default is 3 seconds. The maximum allowed value is 900 seconds. - Timeout *int64 `min:"1" type:"integer"` - - // Set Mode to Active to sample and trace a subset of incoming requests with - // AWS X-Ray. - TracingConfig *TracingConfig `type:"structure"` - - // Specify security groups and subnets in a VPC to which your Lambda function - // needs access. - VpcConfig *VpcConfig `type:"structure"` -} - -// String returns the string representation -func (s UpdateFunctionConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateFunctionConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateFunctionConfigurationInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "UpdateFunctionConfigurationInput"} - - if s.FunctionName == nil { - invalidParams.Add(aws.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("FunctionName", 1)) - } - if s.MemorySize != nil && *s.MemorySize < 128 { - invalidParams.Add(aws.NewErrParamMinValue("MemorySize", 128)) - } - if s.Timeout != nil && *s.Timeout < 1 { - invalidParams.Add(aws.NewErrParamMinValue("Timeout", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s UpdateFunctionConfigurationInput) MarshalFields(e protocol.FieldEncoder) error { - - if s.DeadLetterConfig != nil { - v := s.DeadLetterConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "DeadLetterConfig", v, metadata) - } - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Environment != nil { - v := s.Environment - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Environment", v, metadata) - } - if s.Handler != nil { - v := *s.Handler - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Handler", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.KMSKeyArn != nil { - v := *s.KMSKeyArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "KMSKeyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.Layers) > 0 { - v := s.Layers - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "Layers", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - if s.MemorySize != nil { - v := *s.MemorySize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "MemorySize", protocol.Int64Value(v), metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Role != nil { - v := *s.Role - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.Runtime) > 0 { - v := s.Runtime - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Runtime", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - if s.Timeout != nil { - v := *s.Timeout - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Timeout", protocol.Int64Value(v), metadata) - } - if s.TracingConfig != nil { - v := s.TracingConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "TracingConfig", v, metadata) - } - if s.VpcConfig != nil { - v := s.VpcConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "VpcConfig", v, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.PathTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -// A Lambda function's configuration settings. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UpdateFunctionCodeOutput -type UpdateFunctionConfigurationOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The SHA256 hash of the function's deployment package. - CodeSha256 *string `type:"string"` - - // The size of the function's deployment package in bytes. - CodeSize *int64 `type:"long"` - - // The function's dead letter queue. - DeadLetterConfig *DeadLetterConfig `type:"structure"` - - // The function's description. - Description *string `type:"string"` - - // The function's environment variables. - Environment *EnvironmentResponse `type:"structure"` - - // The function's Amazon Resource Name. - FunctionArn *string `type:"string"` - - // The name of the function. - FunctionName *string `min:"1" type:"string"` - - // The function Lambda calls to begin executing your function. - Handler *string `type:"string"` - - // The KMS key used to encrypt the function's environment variables. Only returned - // if you've configured a customer managed CMK. - KMSKeyArn *string `type:"string"` - - // The date and time that the function was last updated, in ISO-8601 format - // (https://www.w3.org/TR/NOTE-datetime) (YYYY-MM-DDThh:mm:ss.sTZD). - LastModified *string `type:"string"` - - // A list of function layers (http://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html). - Layers []Layer `type:"list"` - - // The ARN of the master function. - MasterArn *string `type:"string"` - - // The memory allocated to the function - MemorySize *int64 `min:"128" type:"integer"` - - // Represents the latest updated revision of the function or alias. - RevisionId *string `type:"string"` - - // The function's execution role. - Role *string `type:"string"` - - // The runtime environment for the Lambda function. - Runtime Runtime `type:"string" enum:"true"` - - // The amount of time that Lambda allows a function to run before terminating - // it. - Timeout *int64 `min:"1" type:"integer"` - - // The function's AWS X-Ray tracing configuration. - TracingConfig *TracingConfigResponse `type:"structure"` - - // The version of the Lambda function. - Version *string `min:"1" type:"string"` - - // The function's networking configuration. - VpcConfig *VpcConfigResponse `type:"structure"` -} - -// String returns the string representation -func (s UpdateFunctionConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateFunctionConfigurationOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s UpdateFunctionConfigurationOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s UpdateFunctionConfigurationOutput) MarshalFields(e protocol.FieldEncoder) error { - if s.CodeSha256 != nil { - v := *s.CodeSha256 - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CodeSha256", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.CodeSize != nil { - v := *s.CodeSize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "CodeSize", protocol.Int64Value(v), metadata) - } - if s.DeadLetterConfig != nil { - v := s.DeadLetterConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "DeadLetterConfig", v, metadata) - } - if s.Description != nil { - v := *s.Description - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Environment != nil { - v := s.Environment - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "Environment", v, metadata) - } - if s.FunctionArn != nil { - v := *s.FunctionArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "FunctionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.FunctionName != nil { - v := *s.FunctionName - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "FunctionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Handler != nil { - v := *s.Handler - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Handler", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.KMSKeyArn != nil { - v := *s.KMSKeyArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "KMSKeyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.LastModified != nil { - v := *s.LastModified - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "LastModified", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.Layers) > 0 { - v := s.Layers - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "Layers", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddFields(v1) - } - ls0.End() - - } - if s.MasterArn != nil { - v := *s.MasterArn - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "MasterArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.MemorySize != nil { - v := *s.MemorySize - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "MemorySize", protocol.Int64Value(v), metadata) - } - if s.RevisionId != nil { - v := *s.RevisionId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.Role != nil { - v := *s.Role - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if len(s.Runtime) > 0 { - v := s.Runtime - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Runtime", protocol.QuotedValue{ValueMarshaler: v}, metadata) - } - if s.Timeout != nil { - v := *s.Timeout - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Timeout", protocol.Int64Value(v), metadata) - } - if s.TracingConfig != nil { - v := s.TracingConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "TracingConfig", v, metadata) - } - if s.Version != nil { - v := *s.Version - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Version", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - if s.VpcConfig != nil { - v := s.VpcConfig - - metadata := protocol.Metadata{} - e.SetFields(protocol.BodyTarget, "VpcConfig", v, metadata) - } - return nil -} - -// The VPC security groups and subnets attached to a Lambda function. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/VpcConfig -type VpcConfig struct { - _ struct{} `type:"structure"` - - // A list of VPC security groups IDs. - SecurityGroupIds []string `type:"list"` - - // A list of VPC subnet IDs. - SubnetIds []string `type:"list"` -} - -// String returns the string representation -func (s VpcConfig) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VpcConfig) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s VpcConfig) MarshalFields(e protocol.FieldEncoder) error { - if len(s.SecurityGroupIds) > 0 { - v := s.SecurityGroupIds - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "SecurityGroupIds", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - if len(s.SubnetIds) > 0 { - v := s.SubnetIds - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "SubnetIds", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - return nil -} - -// The VPC security groups and subnets attached to a Lambda function. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/VpcConfigResponse -type VpcConfigResponse struct { - _ struct{} `type:"structure"` - - // A list of VPC security groups IDs. - SecurityGroupIds []string `type:"list"` - - // A list of VPC subnet IDs. - SubnetIds []string `type:"list"` - - // The ID of the VPC. - VpcId *string `type:"string"` -} - -// String returns the string representation -func (s VpcConfigResponse) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VpcConfigResponse) GoString() string { - return s.String() -} - -// MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s VpcConfigResponse) MarshalFields(e protocol.FieldEncoder) error { - if len(s.SecurityGroupIds) > 0 { - v := s.SecurityGroupIds - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "SecurityGroupIds", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - if len(s.SubnetIds) > 0 { - v := s.SubnetIds - - metadata := protocol.Metadata{} - ls0 := e.List(protocol.BodyTarget, "SubnetIds", metadata) - ls0.Start() - for _, v1 := range v { - ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) - } - ls0.End() - - } - if s.VpcId != nil { - v := *s.VpcId - - metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "VpcId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) - } - return nil -} - -type EventSourcePosition string - -// Enum values for EventSourcePosition -const ( - EventSourcePositionTrimHorizon EventSourcePosition = "TRIM_HORIZON" - EventSourcePositionLatest EventSourcePosition = "LATEST" - EventSourcePositionAtTimestamp EventSourcePosition = "AT_TIMESTAMP" -) - -func (enum EventSourcePosition) MarshalValue() (string, error) { - return string(enum), nil -} - -func (enum EventSourcePosition) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return append(b, enum...), nil -} - -type FunctionVersion string - -// Enum values for FunctionVersion -const ( - FunctionVersionAll FunctionVersion = "ALL" -) - -func (enum FunctionVersion) MarshalValue() (string, error) { - return string(enum), nil -} - -func (enum FunctionVersion) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return append(b, enum...), nil -} - -type InvocationType string - -// Enum values for InvocationType -const ( - InvocationTypeEvent InvocationType = "Event" - InvocationTypeRequestResponse InvocationType = "RequestResponse" - InvocationTypeDryRun InvocationType = "DryRun" -) - -func (enum InvocationType) MarshalValue() (string, error) { - return string(enum), nil -} - -func (enum InvocationType) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return append(b, enum...), nil -} - -type LogType string - -// Enum values for LogType -const ( - LogTypeNone LogType = "None" - LogTypeTail LogType = "Tail" -) - -func (enum LogType) MarshalValue() (string, error) { - return string(enum), nil -} - -func (enum LogType) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return append(b, enum...), nil -} - -type Runtime string - -// Enum values for Runtime -const ( - RuntimeNodejs Runtime = "nodejs" - RuntimeNodejs43 Runtime = "nodejs4.3" - RuntimeNodejs610 Runtime = "nodejs6.10" - RuntimeNodejs810 Runtime = "nodejs8.10" - RuntimeJava8 Runtime = "java8" - RuntimePython27 Runtime = "python2.7" - RuntimePython36 Runtime = "python3.6" - RuntimePython37 Runtime = "python3.7" - RuntimeDotnetcore10 Runtime = "dotnetcore1.0" - RuntimeDotnetcore20 Runtime = "dotnetcore2.0" - RuntimeDotnetcore21 Runtime = "dotnetcore2.1" - RuntimeNodejs43Edge Runtime = "nodejs4.3-edge" - RuntimeGo1X Runtime = "go1.x" - RuntimeRuby25 Runtime = "ruby2.5" - RuntimeProvided Runtime = "provided" -) - -func (enum Runtime) MarshalValue() (string, error) { - return string(enum), nil -} - -func (enum Runtime) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return append(b, enum...), nil -} - -type ThrottleReason string - -// Enum values for ThrottleReason -const ( - ThrottleReasonConcurrentInvocationLimitExceeded ThrottleReason = "ConcurrentInvocationLimitExceeded" - ThrottleReasonFunctionInvocationRateLimitExceeded ThrottleReason = "FunctionInvocationRateLimitExceeded" - ThrottleReasonReservedFunctionConcurrentInvocationLimitExceeded ThrottleReason = "ReservedFunctionConcurrentInvocationLimitExceeded" - ThrottleReasonReservedFunctionInvocationRateLimitExceeded ThrottleReason = "ReservedFunctionInvocationRateLimitExceeded" - ThrottleReasonCallerRateLimitExceeded ThrottleReason = "CallerRateLimitExceeded" -) - -func (enum ThrottleReason) MarshalValue() (string, error) { - return string(enum), nil -} - -func (enum ThrottleReason) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return append(b, enum...), nil -} - -type TracingMode string - -// Enum values for TracingMode -const ( - TracingModeActive TracingMode = "Active" - TracingModePassThrough TracingMode = "PassThrough" -) - -func (enum TracingMode) MarshalValue() (string, error) { - return string(enum), nil -} - -func (enum TracingMode) MarshalValueBuf(b []byte) ([]byte, error) { - b = b[0:0] - return append(b, enum...), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/doc.go deleted file mode 100644 index 5da2c809..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/doc.go +++ /dev/null @@ -1,34 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package lambda provides the client and types for making API -// requests to AWS Lambda. -// -// Overview -// -// This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides -// additional information. For the service overview, see What is AWS Lambda -// (http://docs.aws.amazon.com/lambda/latest/dg/welcome.html), and for information -// about how the service works, see AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html) -// in the AWS Lambda Developer Guide. -// -// See https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31 for more information on this service. -// -// See lambda package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/lambda/ -// -// Using the Client -// -// To AWS Lambda with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Lambda client Lambda for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/lambda/#New -package lambda diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/errors.go deleted file mode 100644 index 4e12ca08..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/errors.go +++ /dev/null @@ -1,174 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package lambda - -const ( - - // ErrCodeCodeStorageExceededException for service response error code - // "CodeStorageExceededException". - // - // You have exceeded your maximum total code size per account. Limits (http://docs.aws.amazon.com/lambda/latest/dg/limits.html) - ErrCodeCodeStorageExceededException = "CodeStorageExceededException" - - // ErrCodeEC2AccessDeniedException for service response error code - // "EC2AccessDeniedException". - // - // Need additional permissions to configure VPC settings. - ErrCodeEC2AccessDeniedException = "EC2AccessDeniedException" - - // ErrCodeEC2ThrottledException for service response error code - // "EC2ThrottledException". - // - // AWS Lambda was throttled by Amazon EC2 during Lambda function initialization - // using the execution role provided for the Lambda function. - ErrCodeEC2ThrottledException = "EC2ThrottledException" - - // ErrCodeEC2UnexpectedException for service response error code - // "EC2UnexpectedException". - // - // AWS Lambda received an unexpected EC2 client exception while setting up for - // the Lambda function. - ErrCodeEC2UnexpectedException = "EC2UnexpectedException" - - // ErrCodeENILimitReachedException for service response error code - // "ENILimitReachedException". - // - // AWS Lambda was not able to create an Elastic Network Interface (ENI) in the - // VPC, specified as part of Lambda function configuration, because the limit - // for network interfaces has been reached. - ErrCodeENILimitReachedException = "ENILimitReachedException" - - // ErrCodeInvalidParameterValueException for service response error code - // "InvalidParameterValueException". - // - // One of the parameters in the request is invalid. For example, if you provided - // an IAM role for AWS Lambda to assume in the CreateFunction or the UpdateFunctionConfiguration - // API, that AWS Lambda is unable to assume you will get this exception. - ErrCodeInvalidParameterValueException = "InvalidParameterValueException" - - // ErrCodeInvalidRequestContentException for service response error code - // "InvalidRequestContentException". - // - // The request body could not be parsed as JSON. - ErrCodeInvalidRequestContentException = "InvalidRequestContentException" - - // ErrCodeInvalidRuntimeException for service response error code - // "InvalidRuntimeException". - // - // The runtime or runtime version specified is not supported. - ErrCodeInvalidRuntimeException = "InvalidRuntimeException" - - // ErrCodeInvalidSecurityGroupIDException for service response error code - // "InvalidSecurityGroupIDException". - // - // The Security Group ID provided in the Lambda function VPC configuration is - // invalid. - ErrCodeInvalidSecurityGroupIDException = "InvalidSecurityGroupIDException" - - // ErrCodeInvalidSubnetIDException for service response error code - // "InvalidSubnetIDException". - // - // The Subnet ID provided in the Lambda function VPC configuration is invalid. - ErrCodeInvalidSubnetIDException = "InvalidSubnetIDException" - - // ErrCodeInvalidZipFileException for service response error code - // "InvalidZipFileException". - // - // AWS Lambda could not unzip the deployment package. - ErrCodeInvalidZipFileException = "InvalidZipFileException" - - // ErrCodeKMSAccessDeniedException for service response error code - // "KMSAccessDeniedException". - // - // Lambda was unable to decrypt the environment variables because KMS access - // was denied. Check the Lambda function's KMS permissions. - ErrCodeKMSAccessDeniedException = "KMSAccessDeniedException" - - // ErrCodeKMSDisabledException for service response error code - // "KMSDisabledException". - // - // Lambda was unable to decrypt the environment variables because the KMS key - // used is disabled. Check the Lambda function's KMS key settings. - ErrCodeKMSDisabledException = "KMSDisabledException" - - // ErrCodeKMSInvalidStateException for service response error code - // "KMSInvalidStateException". - // - // Lambda was unable to decrypt the environment variables because the KMS key - // used is in an invalid state for Decrypt. Check the function's KMS key settings. - ErrCodeKMSInvalidStateException = "KMSInvalidStateException" - - // ErrCodeKMSNotFoundException for service response error code - // "KMSNotFoundException". - // - // Lambda was unable to decrypt the environment variables because the KMS key - // was not found. Check the function's KMS key settings. - ErrCodeKMSNotFoundException = "KMSNotFoundException" - - // ErrCodePolicyLengthExceededException for service response error code - // "PolicyLengthExceededException". - // - // Lambda function access policy is limited to 20 KB. - ErrCodePolicyLengthExceededException = "PolicyLengthExceededException" - - // ErrCodePreconditionFailedException for service response error code - // "PreconditionFailedException". - // - // The RevisionId provided does not match the latest RevisionId for the Lambda - // function or alias. Call the GetFunction or the GetAlias API to retrieve the - // latest RevisionId for your resource. - ErrCodePreconditionFailedException = "PreconditionFailedException" - - // ErrCodeRequestTooLargeException for service response error code - // "RequestTooLargeException". - // - // The request payload exceeded the Invoke request body JSON input limit. For - // more information, see Limits (http://docs.aws.amazon.com/lambda/latest/dg/limits.html). - ErrCodeRequestTooLargeException = "RequestTooLargeException" - - // ErrCodeResourceConflictException for service response error code - // "ResourceConflictException". - // - // The resource already exists. - ErrCodeResourceConflictException = "ResourceConflictException" - - // ErrCodeResourceInUseException for service response error code - // "ResourceInUseException". - // - // The operation conflicts with the resource's availability. For example, you - // attempted to update an EventSoure Mapping in CREATING, or tried to delete - // a EventSoure mapping currently in the UPDATING state. - ErrCodeResourceInUseException = "ResourceInUseException" - - // ErrCodeResourceNotFoundException for service response error code - // "ResourceNotFoundException". - // - // The resource (for example, a Lambda function or access policy statement) - // specified in the request does not exist. - ErrCodeResourceNotFoundException = "ResourceNotFoundException" - - // ErrCodeServiceException for service response error code - // "ServiceException". - // - // The AWS Lambda service encountered an internal error. - ErrCodeServiceException = "ServiceException" - - // ErrCodeSubnetIPAddressLimitReachedException for service response error code - // "SubnetIPAddressLimitReachedException". - // - // AWS Lambda was not able to set up VPC access for the Lambda function because - // one or more configured subnets has no available IP addresses. - ErrCodeSubnetIPAddressLimitReachedException = "SubnetIPAddressLimitReachedException" - - // ErrCodeTooManyRequestsException for service response error code - // "TooManyRequestsException". - // - // Request throughput limit exceeded - ErrCodeTooManyRequestsException = "TooManyRequestsException" - - // ErrCodeUnsupportedMediaTypeException for service response error code - // "UnsupportedMediaTypeException". - // - // The content type of the Invoke request body is not JSON. - ErrCodeUnsupportedMediaTypeException = "UnsupportedMediaTypeException" -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/service.go b/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/service.go deleted file mode 100644 index 6c9a8865..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lambda/service.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package lambda - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" -) - -// Lambda provides the API operation methods for making requests to -// AWS Lambda. See this package's package overview docs -// for details on the service. -// -// Lambda methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type Lambda struct { - *aws.Client -} - -// Used for custom client initialization logic -var initClient func(*Lambda) - -// Used for custom request initialization logic -var initRequest func(*Lambda, *aws.Request) - -// Service information constants -const ( - ServiceName = "lambda" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the Lambda client with a config. -// -// Example: -// // Create a Lambda client from just a config. -// svc := lambda.New(myConfig) -func New(config aws.Config) *Lambda { - var signingName string - signingRegion := config.Region - - svc := &Lambda{ - Client: aws.NewClient( - config, - aws.Metadata{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - APIVersion: "2015-03-31", - }, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc) - } - - return svc -} - -// newRequest creates a new request for a Lambda operation and runs any -// custom request initialization. -func (c *Lambda) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(c, req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api.go deleted file mode 100644 index fad7e8b6..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api.go +++ /dev/null @@ -1,1832 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/awsutil" -) - -const opAssumeRole = "AssumeRole" - -// AssumeRoleRequest is a API request type for the AssumeRole API operation. -type AssumeRoleRequest struct { - *aws.Request - Input *AssumeRoleInput - Copy func(*AssumeRoleInput) AssumeRoleRequest -} - -// Send marshals and sends the AssumeRole API request. -func (r AssumeRoleRequest) Send() (*AssumeRoleOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*AssumeRoleOutput), nil -} - -// AssumeRoleRequest returns a request value for making API operation for -// AWS Security Token Service. -// -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) that you can use to access -// AWS resources that you might not normally have access to. Typically, you -// use AssumeRole for cross-account access or federation. For a comparison of -// AssumeRole with the other APIs that produce temporary credentials, see Requesting -// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// Important: You cannot call AssumeRole by using AWS root account credentials; -// access is denied. You must use credentials for an IAM user or an IAM role -// to call AssumeRole. -// -// For cross-account access, imagine that you own multiple accounts and need -// to access resources in each account. You could create long-term credentials -// in each account to access those resources. However, managing all those credentials -// and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account and -// then use temporary security credentials to access all the other accounts -// by assuming roles in those accounts. For more information about roles, see -// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) -// in the IAM User Guide. -// -// For federation, you can, for example, grant single sign-on access to the -// AWS Management Console. If you already have an identity and authentication -// system in your corporate network, you don't have to recreate user identities -// in AWS in order to grant those user identities access to AWS. Instead, after -// a user has been authenticated, you call AssumeRole (and specify the role -// with the appropriate permissions) to get temporary security credentials for -// that user. With those temporary security credentials, you construct a sign-in -// URL that users can use to access the console. For more information, see Common -// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) -// in the IAM User Guide. -// -// By default, the temporary security credentials created by AssumeRole last -// for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI operations but -// does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// The temporary security credentials created by AssumeRole can be used to make -// API calls to any AWS service with the following exception: you cannot call -// the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to further -// restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) -// in the IAM User Guide. -// -// To assume a role, your AWS account must be trusted by the role. The trust -// relationship is defined in the role's trust policy when the role is created. -// That trust policy states which accounts are allowed to delegate access to -// this account's role. -// -// The user who wants to access the role must also have permissions delegated -// from the role's administrator. If the user is in a different account than -// the role, then the user's administrator must attach a policy that allows -// the user to call AssumeRole on the ARN of the role in the other account. -// If the user is in the same account as the role, then you can either attach -// a policy to the user (identical to the previous different account user), -// or you can add the user as a principal directly in the role's trust policy. -// In this case, the trust policy acts as the only resource-based policy in -// IAM, and users in the same account as the role do not need explicit permission -// to assume the role. For more information about trust policies and resource-based -// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. -// -// Using MFA with AssumeRole -// -// You can optionally include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios in which -// you want to make sure that the user who is assuming the role has been authenticated -// using an AWS MFA device. In that scenario, the trust policy of the role being -// assumed includes a condition that tests for MFA authentication; if the caller -// does not include valid MFA information, the request to assume the role is -// denied. The condition in a trust policy that tests for MFA authentication -// might look like the following example. -// -// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} -// -// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. -// -// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode -// parameters. The SerialNumber value identifies the user's hardware or virtual -// MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA devices produces. -// -// // Example sending a request using the AssumeRoleRequest method. -// req := client.AssumeRoleRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) AssumeRoleRequest { - op := &aws.Operation{ - Name: opAssumeRole, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleInput{} - } - - output := &AssumeRoleOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return AssumeRoleRequest{Request: req, Input: input, Copy: c.AssumeRoleRequest} -} - -const opAssumeRoleWithSAML = "AssumeRoleWithSAML" - -// AssumeRoleWithSAMLRequest is a API request type for the AssumeRoleWithSAML API operation. -type AssumeRoleWithSAMLRequest struct { - *aws.Request - Input *AssumeRoleWithSAMLInput - Copy func(*AssumeRoleWithSAMLInput) AssumeRoleWithSAMLRequest -} - -// Send marshals and sends the AssumeRoleWithSAML API request. -func (r AssumeRoleWithSAMLRequest) Send() (*AssumeRoleWithSAMLOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*AssumeRoleWithSAMLOutput), nil -} - -// AssumeRoleWithSAMLRequest returns a request value for making API operation for -// AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// via a SAML authentication response. This operation provides a mechanism for -// tying an enterprise identity store or directory to role-based AWS access -// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML -// with the other APIs that produce temporary credentials, see Requesting Temporary -// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this operation consist of -// an access key ID, a secret access key, and a security token. Applications -// can use these temporary security credentials to sign calls to AWS services. -// -// By default, the temporary security credentials created by AssumeRoleWithSAML -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. Your role session lasts for the -// duration that you specify, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. You can provide -// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour -// to 12 hours. To learn how to view the maximum value for your role, see View -// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI operations but -// does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// The temporary security credentials created by AssumeRoleWithSAML can be used -// to make API calls to any AWS service with the following exception: you cannot -// call the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by the intersection of both the access policy -// of the role that is being assumed, and the policy that you pass. This means -// that both policies must grant the permission for the action to be allowed. -// This gives you a way to further restrict the permissions for the resulting -// temporary security credentials. You cannot use the passed policy to grant -// permissions that are in excess of those allowed by the access policy of the -// role that is being assumed. For more information, see Permissions for AssumeRole, -// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) -// in the IAM User Guide. -// -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, -// you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider, and create -// an IAM role that specifies this SAML provider in its trust policy. -// -// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. -// The identity of the caller is validated by using keys in the metadata document -// that is uploaded for the SAML provider entity for your identity provider. -// -// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail -// logs. The entry includes the value in the NameID element of the SAML assertion. -// We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the Persistent -// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). -// -// For more information, see the following resources: -// -// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// -// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// -// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// -// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. -// -// // Example sending a request using the AssumeRoleWithSAMLRequest method. -// req := client.AssumeRoleWithSAMLRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) AssumeRoleWithSAMLRequest { - op := &aws.Operation{ - Name: opAssumeRoleWithSAML, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithSAMLInput{} - } - - output := &AssumeRoleWithSAMLOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return AssumeRoleWithSAMLRequest{Request: req, Input: input, Copy: c.AssumeRoleWithSAMLRequest} -} - -const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" - -// AssumeRoleWithWebIdentityRequest is a API request type for the AssumeRoleWithWebIdentity API operation. -type AssumeRoleWithWebIdentityRequest struct { - *aws.Request - Input *AssumeRoleWithWebIdentityInput - Copy func(*AssumeRoleWithWebIdentityInput) AssumeRoleWithWebIdentityRequest -} - -// Send marshals and sends the AssumeRoleWithWebIdentity API request. -func (r AssumeRoleWithWebIdentityRequest) Send() (*AssumeRoleWithWebIdentityOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*AssumeRoleWithWebIdentityOutput), nil -} - -// AssumeRoleWithWebIdentityRequest returns a request value for making API operation for -// AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider, such as Amazon -// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible -// identity provider. -// -// For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) -// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely -// identify a user and supply the user with a consistent identity throughout -// the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview -// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the AWS SDK for iOS Developer Guide. -// -// Calling AssumeRoleWithWebIdentity does not require the use of AWS security -// credentials. Therefore, you can distribute an application (for example, on -// mobile devices) that requests temporary security credentials without including -// long-term AWS credentials in the application, and without deploying server-based -// proxy services that use long-term AWS credentials. Instead, the identity -// of the caller is validated by using a token from the web identity provider. -// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce -// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this API consist of an access -// key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to AWS service APIs. -// -// By default, the temporary security credentials created by AssumeRoleWithWebIdentity -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI operations but -// does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// The temporary security credentials created by AssumeRoleWithWebIdentity can -// be used to make API calls to any AWS service with the following exception: -// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to further -// restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) -// in the IAM User Guide. -// -// Before your application can call AssumeRoleWithWebIdentity, you must have -// an identity token from a supported identity provider and create a role that -// the application can assume. The role that your application assumes must trust -// the identity provider that is associated with the identity token. In other -// words, the identity provider must be specified in the role's trust policy. -// -// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail -// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided Web Identity Token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you -// could instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). -// -// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity -// API, see the following resources: -// -// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). -// This interactive website lets you walk through the process of authenticating -// via Login with Amazon, Facebook, or Google, getting temporary security -// credentials, and then using those credentials to make a request to AWS. -// -// -// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android -// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample -// apps that show how to invoke the identity providers, and then how to use -// the information from these providers to get and use temporary security -// credentials. -// -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon -// S3. -// -// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. -// req := client.AssumeRoleWithWebIdentityRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) AssumeRoleWithWebIdentityRequest { - op := &aws.Operation{ - Name: opAssumeRoleWithWebIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithWebIdentityInput{} - } - - output := &AssumeRoleWithWebIdentityOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return AssumeRoleWithWebIdentityRequest{Request: req, Input: input, Copy: c.AssumeRoleWithWebIdentityRequest} -} - -const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" - -// DecodeAuthorizationMessageRequest is a API request type for the DecodeAuthorizationMessage API operation. -type DecodeAuthorizationMessageRequest struct { - *aws.Request - Input *DecodeAuthorizationMessageInput - Copy func(*DecodeAuthorizationMessageInput) DecodeAuthorizationMessageRequest -} - -// Send marshals and sends the DecodeAuthorizationMessage API request. -func (r DecodeAuthorizationMessageRequest) Send() (*DecodeAuthorizationMessageOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*DecodeAuthorizationMessageOutput), nil -} - -// DecodeAuthorizationMessageRequest returns a request value for making API operation for -// AWS Security Token Service. -// -// Decodes additional information about the authorization status of a request -// from an encoded message returned in response to an AWS request. -// -// For example, if a user is not authorized to perform an action that he or -// she has requested, the request returns a Client.UnauthorizedOperation response -// (an HTTP 403 response). Some AWS actions additionally return an encoded message -// that can provide details about this authorization failure. -// -// Only certain AWS actions return an encoded authorization message. The documentation -// for an individual action indicates whether that action returns an encoded -// message in addition to returning an HTTP code. -// -// The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the action -// should not see. To decode an authorization status message, a user must be -// granted permissions via an IAM policy to request the DecodeAuthorizationMessage -// (sts:DecodeAuthorizationMessage) action. -// -// The decoded message includes the following type of information: -// -// * Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. -// -// * The principal who made the request. -// -// * The requested action. -// -// * The requested resource. -// -// * The values of condition keys in the context of the user's request. -// -// // Example sending a request using the DecodeAuthorizationMessageRequest method. -// req := client.DecodeAuthorizationMessageRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) DecodeAuthorizationMessageRequest { - op := &aws.Operation{ - Name: opDecodeAuthorizationMessage, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DecodeAuthorizationMessageInput{} - } - - output := &DecodeAuthorizationMessageOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return DecodeAuthorizationMessageRequest{Request: req, Input: input, Copy: c.DecodeAuthorizationMessageRequest} -} - -const opGetCallerIdentity = "GetCallerIdentity" - -// GetCallerIdentityRequest is a API request type for the GetCallerIdentity API operation. -type GetCallerIdentityRequest struct { - *aws.Request - Input *GetCallerIdentityInput - Copy func(*GetCallerIdentityInput) GetCallerIdentityRequest -} - -// Send marshals and sends the GetCallerIdentity API request. -func (r GetCallerIdentityRequest) Send() (*GetCallerIdentityOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*GetCallerIdentityOutput), nil -} - -// GetCallerIdentityRequest returns a request value for making API operation for -// AWS Security Token Service. -// -// Returns details about the IAM identity whose credentials are used to call -// the API. -// -// // Example sending a request using the GetCallerIdentityRequest method. -// req := client.GetCallerIdentityRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) GetCallerIdentityRequest { - op := &aws.Operation{ - Name: opGetCallerIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCallerIdentityInput{} - } - - output := &GetCallerIdentityOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetCallerIdentityRequest{Request: req, Input: input, Copy: c.GetCallerIdentityRequest} -} - -const opGetFederationToken = "GetFederationToken" - -// GetFederationTokenRequest is a API request type for the GetFederationToken API operation. -type GetFederationTokenRequest struct { - *aws.Request - Input *GetFederationTokenInput - Copy func(*GetFederationTokenInput) GetFederationTokenRequest -} - -// Send marshals and sends the GetFederationToken API request. -func (r GetFederationTokenRequest) Send() (*GetFederationTokenOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*GetFederationTokenOutput), nil -} - -// GetFederationTokenRequest returns a request value for making API operation for -// AWS Security Token Service. -// -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. Because -// you must call the GetFederationToken action using the long-term security -// credentials of an IAM user, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other APIs that produce temporary -// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// If you are creating a mobile-based or browser-based app that can authenticate -// users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider, we recommend that you -// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. -// For more information, see Federation Through a Web-based Identity Provider -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// The GetFederationToken action must be called by using the long-term AWS security -// credentials of an IAM user. You can also call GetFederationToken using the -// security credentials of an AWS root account, but we do not recommended it. -// Instead, we recommend that you create an IAM user for the purpose of the -// proxy application and then attach a policy to the IAM user that limits federated -// users to only the actions and resources that they need access to. For more -// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// The temporary security credentials that are obtained by using the long-term -// credentials of an IAM user are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default -// is 43200 seconds (12 hours). Temporary credentials that are obtained by using -// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). -// -// The temporary security credentials created by GetFederationToken can be used -// to make API calls to any AWS service with the following exceptions: -// -// * You cannot use these credentials to call any IAM APIs. -// -// * You cannot call any STS APIs except GetCallerIdentity. -// -// Permissions -// -// The permissions for the temporary security credentials returned by GetFederationToken -// are determined by a combination of the following: -// -// * The policy or policies that are attached to the IAM user whose credentials -// are used to call GetFederationToken. -// -// * The policy that is passed as a parameter in the call. -// -// The passed policy is attached to the temporary security credentials that -// result from the GetFederationToken API call--that is, to the federated user. -// When the federated user makes an AWS request, AWS evaluates the policy attached -// to the federated user in combination with the policy or policies attached -// to the IAM user whose credentials were used to call GetFederationToken. AWS -// allows the federated user's request only when both the federated user and -// the IAM user are explicitly allowed to perform the requested action. The -// passed policy cannot grant more permissions than those that are defined in -// the IAM user policy. -// -// A typical use case is that the permissions of the IAM user whose credentials -// are used to call GetFederationToken are designed to allow access to all the -// actions and resources that any federated user will need. Then, for individual -// users, you pass a policy to the operation that scopes down the permissions -// to a level that's appropriate to that individual user, using a policy that -// allows only a subset of permissions that are granted to the IAM user. -// -// If you do not pass a policy, the resulting temporary security credentials -// have no effective permissions. The only exception is when the temporary security -// credentials are used to access a resource that has a resource-based policy -// that specifically allows the federated user to access the resource. -// -// For more information about how permissions work, see Permissions for GetFederationToken -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). -// For information about using GetFederationToken to create temporary security -// credentials, see GetFederationToken—Federation Through a Custom Identity -// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// -// // Example sending a request using the GetFederationTokenRequest method. -// req := client.GetFederationTokenRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) GetFederationTokenRequest { - op := &aws.Operation{ - Name: opGetFederationToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetFederationTokenInput{} - } - - output := &GetFederationTokenOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetFederationTokenRequest{Request: req, Input: input, Copy: c.GetFederationTokenRequest} -} - -const opGetSessionToken = "GetSessionToken" - -// GetSessionTokenRequest is a API request type for the GetSessionToken API operation. -type GetSessionTokenRequest struct { - *aws.Request - Input *GetSessionTokenInput - Copy func(*GetSessionTokenInput) GetSessionTokenRequest -} - -// Send marshals and sends the GetSessionToken API request. -func (r GetSessionTokenRequest) Send() (*GetSessionTokenOutput, error) { - err := r.Request.Send() - if err != nil { - return nil, err - } - - return r.Request.Data.(*GetSessionTokenOutput), nil -} - -// GetSessionTokenRequest returns a request value for making API operation for -// AWS Security Token Service. -// -// Returns a set of temporary credentials for an AWS account or IAM user. The -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use GetSessionToken if you want to use MFA to protect -// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled -// IAM users would need to call GetSessionToken and submit an MFA code that -// is associated with their MFA device. Using the temporary security credentials -// that are returned from the call, IAM users can then make programmatic calls -// to APIs that require MFA authentication. If you do not supply a correct MFA -// code, then the API returns an access denied error. For a comparison of GetSessionToken -// with the other APIs that produce temporary credentials, see Requesting Temporary -// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The GetSessionToken action must be called by using the long-term AWS security -// credentials of the AWS account or an IAM user. Credentials that are created -// by IAM users are valid for the duration that you specify, from 900 seconds -// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default -// of 43200 seconds (12 hours); credentials that are created by using account -// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 -// seconds (1 hour), with a default of 1 hour. -// -// The temporary security credentials created by GetSessionToken can be used -// to make API calls to any AWS service with the following exceptions: -// -// * You cannot call any IAM APIs unless MFA authentication information is -// included in the request. -// -// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. -// -// We recommend that you do not call GetSessionToken with root account credentials. -// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with AWS. -// -// The permissions associated with the temporary security credentials returned -// by GetSessionToken are based on the permissions associated with account or -// IAM user whose credentials are used to call the action. If GetSessionToken -// is called using root account credentials, the temporary credentials have -// root account permissions. Similarly, if GetSessionToken is called using the -// credentials of an IAM user, the temporary credentials have the same permissions -// as the IAM user. -// -// For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. -// -// // Example sending a request using the GetSessionTokenRequest method. -// req := client.GetSessionTokenRequest(params) -// resp, err := req.Send() -// if err == nil { -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) GetSessionTokenRequest { - op := &aws.Operation{ - Name: opGetSessionToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSessionTokenInput{} - } - - output := &GetSessionTokenOutput{} - req := c.newRequest(op, input, output) - output.responseMetadata = aws.Response{Request: req} - - return GetSessionTokenRequest{Request: req, Input: input, Copy: c.GetSessionTokenRequest} -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest -type AssumeRoleInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // A unique identifier that is used by third parties when assuming roles in - // their customers' accounts. For each role that the third party can assume, - // they should instruct their customers to ensure the role's trust policy checks - // for the external ID that the third party generated. Each time the third party - // assumes the role, they should pass the customer's external ID. The external - // ID is useful in order to help third parties bind a role to the customer who - // created it. For more information about the external ID, see How to Use an - // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. - // - // The regex used to validated this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - ExternalId *string `min:"2" type:"string"` - - // An IAM policy in JSON format. - // - // This parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both (the intersection of) the access policy of the role that - // is being assumed, and the policy that you pass. This gives you a way to further - // restrict the permissions for the resulting temporary security credentials. - // You cannot use the passed policy to grant permissions that are in excess - // of those allowed by the access policy of the role that is being assumed. - // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, - // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) - // in the IAM User Guide. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Name (ARN) of the role to assume. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. - // - // Use the role session name to uniquely identify a session when the same role - // is assumed by different principals or for different reasons. In cross-account - // scenarios, the role session name is visible to, and can be logged by the - // account that owns the role. The role session name is also used in the ARN - // of the assumed role principal. This means that subsequent cross-account API - // requests using the temporary security credentials will expose the role session - // name to the external account in their CloudTrail logs. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SerialNumber *string `min:"9" type:"string"` - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s AssumeRoleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "AssumeRoleInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(aws.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.ExternalId != nil && len(*s.ExternalId) < 2 { - invalidParams.Add(aws.NewErrParamMinLen("ExternalId", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Policy", 1)) - } - - if s.RoleArn == nil { - invalidParams.Add(aws.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(aws.NewErrParamMinLen("RoleArn", 20)) - } - - if s.RoleSessionName == nil { - invalidParams.Add(aws.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(aws.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(aws.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(aws.NewErrParamMinLen("TokenCode", 6)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// Contains the response to a successful AssumeRole request, including temporary -// AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse -type AssumeRoleOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation -func (s AssumeRoleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s AssumeRoleOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest -type AssumeRoleWithSAMLInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. Your role session lasts for - // the duration that you specify for the DurationSeconds parameter, or until - // the time specified in the SAML authentication response's SessionNotOnOrAfter - // value, whichever is shorter. You can provide a DurationSeconds value from - // 900 seconds (15 minutes) up to the maximum session duration setting for the - // role. This setting can have a value from 1 hour to 12 hours. If you specify - // a value higher than this setting, the operation fails. For example, if you - // specify a session duration of 12 hours, but your administrator set the maximum - // session duration to 6 hours, your operation fails. To learn how to view the - // maximum value for your role, see View the Maximum Session Duration Setting - // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format. - // - // The policy parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the access policy of the role that is being assumed. For more information, - // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) - // in the IAM User Guide. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes - // the IdP. - // - // PrincipalArn is a required field - PrincipalArn *string `min:"20" type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // The base-64 encoded SAML authentication response provided by the IdP. - // - // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the Using IAM guide. - // - // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumeRoleWithSAMLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithSAMLInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithSAMLInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(aws.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Policy", 1)) - } - - if s.PrincipalArn == nil { - invalidParams.Add(aws.NewErrParamRequired("PrincipalArn")) - } - if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { - invalidParams.Add(aws.NewErrParamMinLen("PrincipalArn", 20)) - } - - if s.RoleArn == nil { - invalidParams.Add(aws.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(aws.NewErrParamMinLen("RoleArn", 20)) - } - - if s.SAMLAssertion == nil { - invalidParams.Add(aws.NewErrParamRequired("SAMLAssertion")) - } - if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { - invalidParams.Add(aws.NewErrParamMinLen("SAMLAssertion", 4)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse -type AssumeRoleWithSAMLOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The value of the Recipient attribute of the SubjectConfirmationData element - // of the SAML assertion. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials *Credentials `type:"structure"` - - // The value of the Issuer element of the SAML assertion. - Issuer *string `type:"string"` - - // A hash value based on the concatenation of the Issuer response value, the - // AWS account ID, and the friendly name (the last part of the ARN) of the SAML - // provider in IAM. The combination of NameQualifier and Subject can be used - // to uniquely identify a federated user. - // - // The following pseudocode shows how the hash value is calculated: - // - // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" - // ) ) - NameQualifier *string `type:"string"` - - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The value of the NameID element in the Subject element of the SAML assertion. - Subject *string `type:"string"` - - // The format of the name ID, as defined by the Format attribute in the NameID - // element of the SAML assertion. Typical examples of the format are transient - // or persistent. - // - // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, - // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient - // is returned as transient. If the format includes any other prefix, the format - // is returned with no modifications. - SubjectType *string `type:"string"` -} - -// String returns the string representation -func (s AssumeRoleWithSAMLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithSAMLOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s AssumeRoleWithSAMLOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest -type AssumeRoleWithWebIdentityInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format. - // - // The policy parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the access policy of the role that is being assumed. For more information, - // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) - // in the IAM User Guide. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string `min:"1" type:"string"` - - // The fully qualified host component of the domain name of the identity provider. - // - // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com - // and graph.facebook.com are the only supported identity providers for OAuth - // 2.0 access tokens. Do not include URL schemes and port numbers. - // - // Do not specify this value for OpenID Connect ID tokens. - ProviderId *string `min:"4" type:"string"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. Typically, you pass the name - // or identifier that is associated with the user who is using your application. - // That way, the temporary security credentials that your application will use - // are associated with that user. This session name is included as part of the - // ARN and assumed role ID in the AssumedRoleUser response element. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The OAuth 2.0 access token or OpenID Connect ID token that is provided by - // the identity provider. Your application must get this token by authenticating - // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. - // - // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumeRoleWithWebIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithWebIdentityInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithWebIdentityInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(aws.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Policy", 1)) - } - if s.ProviderId != nil && len(*s.ProviderId) < 4 { - invalidParams.Add(aws.NewErrParamMinLen("ProviderId", 4)) - } - - if s.RoleArn == nil { - invalidParams.Add(aws.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(aws.NewErrParamMinLen("RoleArn", 20)) - } - - if s.RoleSessionName == nil { - invalidParams.Add(aws.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(aws.NewErrParamMinLen("RoleSessionName", 2)) - } - - if s.WebIdentityToken == nil { - invalidParams.Add(aws.NewErrParamRequired("WebIdentityToken")) - } - if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { - invalidParams.Add(aws.NewErrParamMinLen("WebIdentityToken", 4)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse -type AssumeRoleWithWebIdentityOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The intended audience (also known as client ID) of the web identity token. - // This is traditionally the client identifier issued to the application that - // requested the web identity token. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The issuing authority of the web identity token presented. For OpenID Connect - // ID Tokens this contains the value of the iss field. For OAuth 2.0 access - // tokens, this contains the value of the ProviderId parameter that was passed - // in the AssumeRoleWithWebIdentity request. - Provider *string `type:"string"` - - // The unique user identifier that is returned by the identity provider. This - // identifier is associated with the WebIdentityToken that was submitted with - // the AssumeRoleWithWebIdentity call. The identifier is typically unique to - // the user and the application that acquired the WebIdentityToken (pairwise - // identifier). For OpenID Connect ID tokens, this field contains the value - // returned by the identity provider as the token's sub (Subject) claim. - SubjectFromWebIdentityToken *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s AssumeRoleWithWebIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithWebIdentityOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s AssumeRoleWithWebIdentityOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// The identifiers for the temporary security credentials that the operation -// returns. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser -type AssumedRoleUser struct { - _ struct{} `type:"structure"` - - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // A unique identifier that contains the role ID and the role session name of - // the role that is being assumed. The role ID is generated by AWS when the - // role is created. - // - // AssumedRoleId is a required field - AssumedRoleId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumedRoleUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumedRoleUser) GoString() string { - return s.String() -} - -// AWS credentials for API authentication. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials -type Credentials struct { - _ struct{} `type:"structure"` - - // The access key ID that identifies the temporary security credentials. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` - - // The date on which the current credentials expire. - // - // Expiration is a required field - Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` - - // The secret access key that can be used to sign requests. - // - // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` - - // The token that users must pass to the service API to use the temporary credentials. - // - // SessionToken is a required field - SessionToken *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Credentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Credentials) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest -type DecodeAuthorizationMessageInput struct { - _ struct{} `type:"structure"` - - // The encoded message that was returned with the response. - // - // EncodedMessage is a required field - EncodedMessage *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DecodeAuthorizationMessageInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecodeAuthorizationMessageInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecodeAuthorizationMessageInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} - - if s.EncodedMessage == nil { - invalidParams.Add(aws.NewErrParamRequired("EncodedMessage")) - } - if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("EncodedMessage", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an AWS -// request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse -type DecodeAuthorizationMessageOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // An XML document that contains the decoded message. - DecodedMessage *string `type:"string"` -} - -// String returns the string representation -func (s DecodeAuthorizationMessageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecodeAuthorizationMessageOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s DecodeAuthorizationMessageOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// Identifiers for the federated user that is associated with the credentials. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser -type FederatedUser struct { - _ struct{} `type:"structure"` - - // The ARN that specifies the federated user that is associated with the credentials. - // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // The string that identifies the federated user associated with the credentials, - // similar to the unique ID of an IAM user. - // - // FederatedUserId is a required field - FederatedUserId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s FederatedUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FederatedUser) GoString() string { - return s.String() -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest -type GetCallerIdentityInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s GetCallerIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCallerIdentityInput) GoString() string { - return s.String() -} - -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse -type GetCallerIdentityOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The AWS account ID number of the account that owns or contains the calling - // entity. - Account *string `type:"string"` - - // The AWS ARN associated with the calling entity. - Arn *string `min:"20" type:"string"` - - // The unique identifier of the calling entity. The exact value depends on the - // type of entity making the call. The values returned are those listed in the - // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. - UserId *string `type:"string"` -} - -// String returns the string representation -func (s GetCallerIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCallerIdentityOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s GetCallerIdentityOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest -type GetFederationTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds - // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained - // using AWS account (root) credentials are restricted to a maximum of 3600 - // seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using AWS account (root) credentials defaults to one - // hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob). For example, you can reference - // the federated user name in a resource-based policy, such as in an Amazon - // S3 bucket policy. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // Name is a required field - Name *string `min:"2" type:"string" required:"true"` - - // An IAM policy in JSON format that is passed with the GetFederationToken call - // and evaluated along with the policy or policies that are attached to the - // IAM user whose credentials are used to call GetFederationToken. The passed - // policy is used to scope down the permissions that are available to the IAM - // user, by allowing only a subset of the permissions that are granted to the - // IAM user. The passed policy cannot grant more permissions than those granted - // to the IAM user. The final permissions for the federated user are the most - // restrictive set based on the intersection of the passed policy and the IAM - // user policy. - // - // If you do not pass a policy, the resulting temporary security credentials - // have no effective permissions. The only exception is when the temporary security - // credentials are used to access a resource that has a resource-based policy - // that specifically allows the federated user to access the resource. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - // - // For more information about how permissions work, see Permissions for GetFederationToken - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). - Policy *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s GetFederationTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFederationTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFederationTokenInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "GetFederationTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(aws.NewErrParamMinValue("DurationSeconds", 900)) - } - - if s.Name == nil { - invalidParams.Add(aws.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(aws.NewErrParamMinLen("Name", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(aws.NewErrParamMinLen("Policy", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// Contains the response to a successful GetFederationToken request, including -// temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse -type GetFederationTokenOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials *Credentials `type:"structure"` - - // Identifiers for the federated user associated with the credentials (such - // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You - // can use the federated user's ARN in your resource-based policies, such as - // an Amazon S3 bucket policy. - FederatedUser *FederatedUser `type:"structure"` - - // A percentage value indicating the size of the policy in packed form. The - // service rejects policies for which the packed size is greater than 100 percent - // of the allowed value. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation -func (s GetFederationTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFederationTokenOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s GetFederationTokenOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest -type GetSessionTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 - // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions - // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). - // If the duration is longer than one hour, the session for AWS account owners - // defaults to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The identification number of the MFA device that is associated with the IAM - // user who is making the GetSessionToken call. Specify this value if the IAM - // user has a policy that requires MFA authentication. The value is either the - // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource - // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the AWS Management Console - // and viewing the user's security credentials. - // - // The regex used to validated this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - SerialNumber *string `min:"9" type:"string"` - - // The value provided by the MFA device, if MFA is required. If any policy requires - // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, and the user does not provide a code when requesting a set of - // temporary security credentials, the user will receive an "access denied" - // response when requesting resources that require MFA authentication. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s GetSessionTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSessionTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSessionTokenInput) Validate() error { - invalidParams := aws.ErrInvalidParams{Context: "GetSessionTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(aws.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(aws.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(aws.NewErrParamMinLen("TokenCode", 6)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// Contains the response to a successful GetSessionToken request, including -// temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse -type GetSessionTokenOutput struct { - _ struct{} `type:"structure"` - - responseMetadata aws.Response - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials *Credentials `type:"structure"` -} - -// String returns the string representation -func (s GetSessionTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSessionTokenOutput) GoString() string { - return s.String() -} - -// SDKResponseMetdata return sthe response metadata for the API. -func (s GetSessionTokenOutput) SDKResponseMetadata() aws.Response { - return s.responseMetadata -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/customizations.go deleted file mode 100644 index 488324c3..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/customizations.go +++ /dev/null @@ -1,12 +0,0 @@ -package sts - -import request "github.com/aws/aws-sdk-go-v2/aws" - -func init() { - initRequest = func(c *STS, r *request.Request) { - switch r.Operation.Name { - case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: - r.Handlers.Sign.Clear() // these operations are unsigned - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go deleted file mode 100644 index a43fa805..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go +++ /dev/null @@ -1,72 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sts provides the client and types for making API -// requests to AWS Security Token Service. -// -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to STS. For example, the SDKs take care -// of cryptographically signing requests, managing errors, and retrying requests -// automatically. For information about the AWS SDKs, including how to download -// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com -// that maps to the US East (N. Virginia) region. Additional regions are available -// and are activated by default. For more information, see Activating and Deactivating -// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) -// in the AWS General Reference. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. To learn more about CloudTrail, including how to turn it on and find -// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). -// -// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. -// -// See sts package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ -// -// Using the Client -// -// To AWS Security Token Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Security Token Service client STS for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New -package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/errors.go deleted file mode 100644 index e24884ef..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/errors.go +++ /dev/null @@ -1,73 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -const ( - - // ErrCodeExpiredTokenException for service response error code - // "ExpiredTokenException". - // - // The web identity token that was passed is expired or is not valid. Get a - // new identity token from the identity provider and then retry the request. - ErrCodeExpiredTokenException = "ExpiredTokenException" - - // ErrCodeIDPCommunicationErrorException for service response error code - // "IDPCommunicationError". - // - // The request could not be fulfilled because the non-AWS identity provider - // (IDP) that was asked to verify the incoming identity token could not be reached. - // This is often a transient error caused by network conditions. Retry the request - // a limited number of times so that you don't exceed the request rate. If the - // error persists, the non-AWS identity provider might be down or not responding. - ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" - - // ErrCodeIDPRejectedClaimException for service response error code - // "IDPRejectedClaim". - // - // The identity provider (IdP) reported that authentication failed. This might - // be because the claim is invalid. - // - // If this error is returned for the AssumeRoleWithWebIdentity operation, it - // can also mean that the claim has expired or has been explicitly revoked. - ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" - - // ErrCodeInvalidAuthorizationMessageException for service response error code - // "InvalidAuthorizationMessageException". - // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. - ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" - - // ErrCodeInvalidIdentityTokenException for service response error code - // "InvalidIdentityToken". - // - // The web identity token that was passed could not be validated by AWS. Get - // a new identity token from the identity provider and then retry the request. - ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" - - // ErrCodeMalformedPolicyDocumentException for service response error code - // "MalformedPolicyDocument". - // - // The request was rejected because the policy document was malformed. The error - // message describes the specific error. - ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" - - // ErrCodePackedPolicyTooLargeException for service response error code - // "PackedPolicyTooLarge". - // - // The request was rejected because the policy document was too large. The error - // message describes how big the policy document is, in packed form, as a percentage - // of what the API allows. - ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" - - // ErrCodeRegionDisabledException for service response error code - // "RegionDisabledException". - // - // STS is not activated in the requested region for the account that is being - // asked to generate credentials. The account administrator must use the IAM - // console to activate STS in that region. For more information, see Activating - // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the IAM User Guide. - ErrCodeRegionDisabledException = "RegionDisabledException" -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/service.go deleted file mode 100644 index 79775f71..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/service.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/private/protocol/query" -) - -// STS provides the API operation methods for making requests to -// AWS Security Token Service. See this package's package overview docs -// for details on the service. -// -// STS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type STS struct { - *aws.Client -} - -// Used for custom client initialization logic -var initClient func(*STS) - -// Used for custom request initialization logic -var initRequest func(*STS, *aws.Request) - -// Service information constants -const ( - ServiceName = "sts" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. -) - -// New creates a new instance of the STS client with a config. -// -// Example: -// // Create a STS client from just a config. -// svc := sts.New(myConfig) -func New(config aws.Config) *STS { - var signingName string - signingRegion := config.Region - - svc := &STS{ - Client: aws.NewClient( - config, - aws.Metadata{ - ServiceName: ServiceName, - SigningName: signingName, - SigningRegion: signingRegion, - APIVersion: "2011-06-15", - }, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(query.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc) - } - - return svc -} - -// newRequest creates a new request for a STS operation and runs any -// custom request initialization. -func (c *STS) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(c, req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 899129ec..00000000 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go deleted file mode 100644 index 1c496742..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go +++ /dev/null @@ -1,93 +0,0 @@ -// Package arn provides a parser for interacting with Amazon Resource Names. -package arn - -import ( - "errors" - "strings" -) - -const ( - arnDelimiter = ":" - arnSections = 6 - arnPrefix = "arn:" - - // zero-indexed - sectionPartition = 1 - sectionService = 2 - sectionRegion = 3 - sectionAccountID = 4 - sectionResource = 5 - - // errors - invalidPrefix = "arn: invalid prefix" - invalidSections = "arn: not enough sections" -) - -// ARN captures the individual fields of an Amazon Resource Name. -// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. -type ARN struct { - // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in - // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China - // (Beijing) region is "aws-cn". - Partition string - - // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of - // namespaces, see - // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. - Service string - - // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this - // component might be omitted. - Region string - - // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the - // ARNs for some resources don't require an account number, so this component might be omitted. - AccountID string - - // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — - // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the - // resource name itself. Some services allows paths for resource names, as described in - // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. - Resource string -} - -// Parse parses an ARN into its constituent parts. -// -// Some example ARNs: -// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment -// arn:aws:iam::123456789012:user/David -// arn:aws:rds:eu-west-1:123456789012:db:mysql-db -// arn:aws:s3:::my_corporate_bucket/exampleobject.png -func Parse(arn string) (ARN, error) { - if !strings.HasPrefix(arn, arnPrefix) { - return ARN{}, errors.New(invalidPrefix) - } - sections := strings.SplitN(arn, arnDelimiter, arnSections) - if len(sections) != arnSections { - return ARN{}, errors.New(invalidSections) - } - return ARN{ - Partition: sections[sectionPartition], - Service: sections[sectionService], - Region: sections[sectionRegion], - AccountID: sections[sectionAccountID], - Resource: sections[sectionResource], - }, nil -} - -// IsARN returns whether the given string is an ARN by looking for -// whether the string starts with "arn:" and contains the correct number -// of sections delimited by colons(:). -func IsARN(arn string) bool { - return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 -} - -// String returns the canonical representation of the ARN -func (arn ARN) String() string { - return arnPrefix + - arn.Partition + arnDelimiter + - arn.Service + arnDelimiter + - arn.Region + arnDelimiter + - arn.AccountID + arnDelimiter + - arn.Resource -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index 99849c0e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,164 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} - -// UnmarshalError provides the interface for the SDK failing to unmarshal data. -type UnmarshalError interface { - awsError - Bytes() []byte -} - -// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding -// the bytes that fail to unmarshal to the error. -func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { - return &unmarshalError{ - awsError: New("UnmarshalError", msg, err), - bytes: bytes, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index 9cf7eaf4..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,221 +0,0 @@ -package awserr - -import ( - "encoding/hex" - "fmt" -) - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occurred", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string - bytes []byte -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -type unmarshalError struct { - awsError - bytes []byte -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (e unmarshalError) Error() string { - extra := hex.Dump(e.bytes) - return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (e unmarshalError) String() string { - return e.Error() -} - -// Bytes returns the bytes that failed to unmarshal. -func (e unmarshalError) Bytes() []byte { - return e.bytes -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += e[i].Error() - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go deleted file mode 100644 index 1a3d106d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ /dev/null @@ -1,108 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" - "time" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - if _, ok := src.Interface().(*time.Time); !ok { - dst.Set(reflect.New(e)) - } else { - tempValue := reflect.New(e) - tempValue.Elem().Set(src.Elem()) - // Sets time.Time's unexported values - dst.Set(tempValue) - } - } - if src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go deleted file mode 100644 index 142a7a01..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ /dev/null @@ -1,27 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type they are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go deleted file mode 100644 index a4eb6a7f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ /dev/null @@ -1,221 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.EqualFold(name, c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - rvals := rValuesAtPath(i, path, true, false, v == nil) - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - dstVal.Set(srcVal) - } - -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go deleted file mode 100644 index 710eb432..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ /dev/null @@ -1,113 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - strtype := v.Type().String() - if strtype == "[]uint8" { - fmt.Fprintf(buf, " len %d", v.Len()) - break - } - - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go deleted file mode 100644 index 645df245..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ /dev/null @@ -1,88 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - for i := 0; i < v.Type().NumField(); i++ { - ft := v.Type().Field(i) - fv := v.Field(i) - - if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { - continue // ignore unexported fields - } - if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { - continue // ignore unset fields - } - - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(ft.Name + ": ") - - if tag := ft.Tag.Get("sensitive"); tag == "true" { - buf.WriteString("") - } else { - stringValue(fv, indent+2, buf) - } - - buf.WriteString(",\n") - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go deleted file mode 100644 index 03334d69..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ /dev/null @@ -1,97 +0,0 @@ -package client - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides configuration to a service client instance. -type Config struct { - Config *aws.Config - Handlers request.Handlers - PartitionID string - Endpoint string - SigningRegion string - SigningName string - - // States that the signing name did not come from a modeled source but - // was derived based on other data. Used by service client constructors - // to determine if the signin name can be overridden based on metadata the - // service has. - SigningNameDerived bool -} - -// ConfigProvider provides a generic way for a service client to receive -// the ClientConfig without circular dependencies. -type ConfigProvider interface { - ClientConfig(serviceName string, cfgs ...*aws.Config) Config -} - -// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not -// resolve the endpoint automatically. The service client's endpoint must be -// provided via the aws.Config.Endpoint field. -type ConfigNoResolveEndpointProvider interface { - ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - request.Retryer - metadata.ClientInfo - - Config aws.Config - Handlers request.Handlers -} - -// New will return a pointer to a new initialized service client. -func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { - svc := &Client{ - Config: cfg, - ClientInfo: info, - Handlers: handlers.Copy(), - } - - switch retryer, ok := cfg.Retryer.(request.Retryer); { - case ok: - svc.Retryer = retryer - case cfg.Retryer != nil && cfg.Logger != nil: - s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) - cfg.Logger.Log(s) - fallthrough - default: - maxRetries := aws.IntValue(cfg.MaxRetries) - if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = DefaultRetryerMaxNumRetries - } - svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} - } - - svc.AddDebugHandlers() - - for _, option := range options { - option(svc) - } - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { - return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - if !c.Config.LogLevel.AtLeast(aws.LogDebug) { - return - } - - c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) - c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go deleted file mode 100644 index 9f6af19d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ /dev/null @@ -1,177 +0,0 @@ -package client - -import ( - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, you can implement the -// request.Retryer interface. -// -type DefaultRetryer struct { - // Num max Retries is the number of max retries that will be performed. - // By default, this is zero. - NumMaxRetries int - - // MinRetryDelay is the minimum retry delay after which retry will be performed. - // If not set, the value is 0ns. - MinRetryDelay time.Duration - - // MinThrottleRetryDelay is the minimum retry delay when throttled. - // If not set, the value is 0ns. - MinThrottleDelay time.Duration - - // MaxRetryDelay is the maximum retry delay before which retry must be performed. - // If not set, the value is 0ns. - MaxRetryDelay time.Duration - - // MaxThrottleDelay is the maximum retry delay when throttled. - // If not set, the value is 0ns. - MaxThrottleDelay time.Duration -} - -const ( - // DefaultRetryerMaxNumRetries sets maximum number of retries - DefaultRetryerMaxNumRetries = 3 - - // DefaultRetryerMinRetryDelay sets minimum retry delay - DefaultRetryerMinRetryDelay = 30 * time.Millisecond - - // DefaultRetryerMinThrottleDelay sets minimum delay when throttled - DefaultRetryerMinThrottleDelay = 500 * time.Millisecond - - // DefaultRetryerMaxRetryDelay sets maximum retry delay - DefaultRetryerMaxRetryDelay = 300 * time.Second - - // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled - DefaultRetryerMaxThrottleDelay = 300 * time.Second -) - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -// setRetryerDefaults sets the default values of the retryer if not set -func (d *DefaultRetryer) setRetryerDefaults() { - if d.MinRetryDelay == 0 { - d.MinRetryDelay = DefaultRetryerMinRetryDelay - } - if d.MaxRetryDelay == 0 { - d.MaxRetryDelay = DefaultRetryerMaxRetryDelay - } - if d.MinThrottleDelay == 0 { - d.MinThrottleDelay = DefaultRetryerMinThrottleDelay - } - if d.MaxThrottleDelay == 0 { - d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay - } -} - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - - // if number of max retries is zero, no retries will be performed. - if d.NumMaxRetries == 0 { - return 0 - } - - // Sets default value for retryer members - d.setRetryerDefaults() - - // minDelay is the minimum retryer delay - minDelay := d.MinRetryDelay - - var initialDelay time.Duration - - isThrottle := r.IsErrorThrottle() - if isThrottle { - if delay, ok := getRetryAfterDelay(r); ok { - initialDelay = delay - } - minDelay = d.MinThrottleDelay - } - - retryCount := r.RetryCount - - // maxDelay the maximum retryer delay - maxDelay := d.MaxRetryDelay - - if isThrottle { - maxDelay = d.MaxThrottleDelay - } - - var delay time.Duration - - // Logic to cap the retry count based on the minDelay provided - actualRetryCount := int(math.Log2(float64(minDelay))) + 1 - if actualRetryCount < 63-retryCount { - delay = time.Duration(1< maxDelay { - delay = getJitterDelay(maxDelay / 2) - } - } else { - delay = getJitterDelay(maxDelay / 2) - } - return delay + initialDelay -} - -// getJitterDelay returns a jittered delay for retry -func getJitterDelay(duration time.Duration) time.Duration { - return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - - // ShouldRetry returns false if number of max retries is 0. - if d.NumMaxRetries == 0 { - return false - } - - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable != nil { - return *r.Retryable - } - return r.IsErrorRetryable() || r.IsErrorThrottle() -} - -// This will look in the Retry-After header, RFC 7231, for how long -// it will wait before attempting another request -func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { - if !canUseRetryAfterHeader(r) { - return 0, false - } - - delayStr := r.HTTPResponse.Header.Get("Retry-After") - if len(delayStr) == 0 { - return 0, false - } - - delay, err := strconv.Atoi(delayStr) - if err != nil { - return 0, false - } - - return time.Duration(delay) * time.Second, true -} - -// Will look at the status code to see if the retry header pertains to -// the status code. -func canUseRetryAfterHeader(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 503: - default: - return false - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go deleted file mode 100644 index 8958c32d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ /dev/null @@ -1,194 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http/httputil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s -------------------------------------------------------` - -type logWriter struct { - // Logger is what we will use to log the payload of a response. - Logger aws.Logger - // buf stores the contents of what has been read - buf *bytes.Buffer -} - -func (logger *logWriter) Write(b []byte) (int, error) { - return logger.buf.Write(b) -} - -type teeReaderCloser struct { - // io.Reader will be a tee reader that is used during logging. - // This structure will read from a body and write the contents to a logger. - io.Reader - // Source is used just to close when we are done reading. - Source io.ReadCloser -} - -func (reader *teeReaderCloser) Close() error { - return reader.Source.Close() -} - -// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent -// to a service. Will include the HTTP request body if the LogLevel of the -// request matches LogDebugWithHTTPBody. -var LogHTTPRequestHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequest", - Fn: logRequest, -} - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - bodySeekable := aws.IsReaderSeekable(r.Body) - - b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - if !bodySeekable { - r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) - } - // Reset the request body because dumpRequest will re-wrap the - // r.HTTPRequest's Body as a NoOpCloser and will not be reset after - // read by the HTTP client reader. - if err := r.Error; err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent -// to a service. Will only log the HTTP request's headers. The request payload -// will not be read. -var LogHTTPRequestHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequestHeader", - Fn: logRequestHeader, -} - -func logRequestHeader(r *request.Request) { - b, err := httputil.DumpRequestOut(r.HTTPRequest, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -// LogHTTPResponseHandler is a SDK request handler to log the HTTP response -// received from a service. Will include the HTTP response body if the LogLevel -// of the request matches LogDebugWithHTTPBody. -var LogHTTPResponseHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponse", - Fn: logResponse, -} - -func logResponse(r *request.Request) { - lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - - if r.HTTPResponse == nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) - return - } - - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - if logBody { - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, - } - } - - handlerFn := func(req *request.Request) { - b, err := httputil.DumpResponse(req.HTTPResponse, false) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(fmt.Sprintf(logRespMsg, - req.ClientInfo.ServiceName, req.Operation.Name, string(b))) - - if logBody { - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(string(b)) - } - } - - const handlerName = "awsdk.client.LogResponse.ResponseBody" - - r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) - r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) -} - -// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP -// response received from a service. Will only log the HTTP response's headers. -// The response payload will not be read. -var LogHTTPResponseHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponseHeader", - Fn: logResponseHeader, -} - -func logResponseHeader(r *request.Request) { - if r.Config.Logger == nil { - return - } - - b, err := httputil.DumpResponse(r.HTTPResponse, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go deleted file mode 100644 index 0c48f72e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ /dev/null @@ -1,14 +0,0 @@ -package metadata - -// ClientInfo wraps immutable data from the client.Client structure. -type ClientInfo struct { - ServiceName string - ServiceID string - APIVersion string - PartitionID string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go deleted file mode 100644 index 881d575f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// NoOpRetryer provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type NoOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d NoOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index 2def23fa..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,586 +0,0 @@ -package aws - -import ( - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// UseServiceDefaultRetries instructs the config to use the service's own -// default number of retries. This will be the default action if -// Config.MaxRetries is nil also. -const UseServiceDefaultRetries = -1 - -// RequestRetryer is an alias for a type that implements the request.Retryer -// interface. -type RequestRetryer interface{} - -// A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig structure. -// -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) -type Config struct { - // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to - // retrieve credentials. - CredentialsChainVerboseErrors *bool - - // The credentials object to use when signing requests. Defaults to a - // chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. - // - // Note: You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The resolver to use for looking up endpoints for AWS service clients - // to use based on region. - EndpointResolver endpoints.Resolver - - // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call - // ShouldRetry regardless of whether or not if request.Retryable is set. - // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck - // is not set, then ShouldRetry will only be called if request.Retryable is nil. - // Proper handling of the request.Retryable field is important when setting this field. - EnforceShouldRetryCheck *bool - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS - // Regions and Endpoints. - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service - // specific configuration. - MaxRetries *int - - // Retryer guides how HTTP requests should be retried in case of - // recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the client.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - // - Retryer RequestRetryer - - // Disables semantic parameter validation, which validates input for - // missing required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client - // will use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // Note: This configuration option is specific to the Amazon S3 service. - // - // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // for Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool - - // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` - // header to PUT requests over 2MB of content. 100-Continue instructs the - // HTTP client not to send the body until the service responds with a - // `continue` status. This is useful to prevent sending the request body - // until after the request is authenticated, and validated. - // - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // - // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait - // timeout. https://golang.org/pkg/net/http/#Transport - // - // You should use this flag to disble 100-Continue if you experience issues - // with proxies or third party S3 compatible services. - S3Disable100Continue *bool - - // Set this to `true` to enable S3 Accelerate feature. For all operations - // compatible with S3 Accelerate will use the accelerate endpoint for - // requests. Requests not compatible will fall back to normal S3 requests. - // - // The bucket must be enable for accelerate to be used with S3 client with - // accelerate enabled. If the bucket is not enabled for accelerate an error - // will be returned. The bucket name must be DNS compatible to also work - // with accelerate. - S3UseAccelerate *bool - - // S3DisableContentMD5Validation config option is temporarily disabled, - // For S3 GetObject API calls, #1837. - // - // Set this to `true` to disable the S3 service client from automatically - // adding the ContentMD5 to S3 Object Put and Upload API calls. This option - // will also disable the SDK from performing object ContentMD5 validation - // on GetObject API calls. - S3DisableContentMD5Validation *bool - - // Set this to `true` to have the S3 service client to use the region specified - // in the ARN, when an ARN is provided as an argument to a bucket parameter. - S3UseARNRegion *bool - - // Set this to `true` to enable the SDK to unmarshal API response header maps to - // normalized lower case map keys. - // - // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case - // Metadata member's map keys. The value of the header in the map is unaffected. - LowerCaseHeaderMaps *bool - - // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the - // EC2Metadata client to create a new http.Client. This options is only - // meaningful if you're not already using a custom HTTP client with the - // SDK. Enabled by default. - // - // Must be set and provided to the session.NewSession() in order to disable - // the EC2Metadata overriding the timeout for default credentials chain. - // - // Example: - // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDiableTimeoutOverride(true))) - // - // svc := s3.New(sess) - // - EC2MetadataDisableTimeoutOverride *bool - - // Instructs the endpoint to be generated for a service client to - // be the dual stack endpoint. The dual stack endpoint will support - // both IPv4 and IPv6 addressing. - // - // Setting this for a service which does not support dual stack will fail - // to make requets. It is not recommended to set this value on the session - // as it will apply to all service clients created with the session. Even - // services which don't support dual stack endpoints. - // - // If the Endpoint config value is also provided the UseDualStack flag - // will be ignored. - // - // Only supported with. - // - // sess := session.Must(session.NewSession()) - // - // svc := s3.New(sess, &aws.Config{ - // UseDualStack: aws.Bool(true), - // }) - UseDualStack *bool - - // SleepDelay is an override for the func the SDK will call when sleeping - // during the lifecycle of a request. Specifically this will be used for - // request delays. This value should only be used for testing. To adjust - // the delay of a request see the aws/client.DefaultRetryer and - // aws/request.Retryer. - // - // SleepDelay will prevent any Context from being used for canceling retry - // delay of an API operation. It is recommended to not use SleepDelay at all - // and specify a Retryer instead. - SleepDelay func(time.Duration) - - // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. - // Will default to false. This would only be used for empty directory names in s3 requests. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // DisableRestProtocolURICleaning: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("//foo//bar//moo"), - // }) - DisableRestProtocolURICleaning *bool - - // EnableEndpointDiscovery will allow for endpoint discovery on operations that - // have the definition in its model. By default, endpoint discovery is off. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // EnableEndpointDiscovery: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("/foo/bar/moo"), - // }) - EnableEndpointDiscovery *bool - - // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing - // request endpoint hosts with modeled information. - // - // Disabling this feature is useful when you want to use local endpoints - // for testing that do not support the modeled host prefix pattern. - DisableEndpointHostPrefix *bool - - // STSRegionalEndpoint will enable regional or legacy endpoint resolving - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -// -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) -func NewConfig() *Config { - return &Config{} -} - -// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning -// a Config pointer. -func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { - c.CredentialsChainVerboseErrors = &verboseErrs - return c -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithEndpointResolver sets a config EndpointResolver value returning a -// Config pointer for chaining. -func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { - c.EndpointResolver = resolver - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// WithS3Disable100Continue sets a config S3Disable100Continue value returning -// a Config pointer for chaining. -func (c *Config) WithS3Disable100Continue(disable bool) *Config { - c.S3Disable100Continue = &disable - return c -} - -// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config -// pointer for chaining. -func (c *Config) WithS3UseAccelerate(enable bool) *Config { - c.S3UseAccelerate = &enable - return c - -} - -// WithS3DisableContentMD5Validation sets a config -// S3DisableContentMD5Validation value returning a Config pointer for chaining. -func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { - c.S3DisableContentMD5Validation = &enable - return c - -} - -// WithS3UseARNRegion sets a config S3UseARNRegion value and -// returning a Config pointer for chaining -func (c *Config) WithS3UseARNRegion(enable bool) *Config { - c.S3UseARNRegion = &enable - return c -} - -// WithUseDualStack sets a config UseDualStack value returning a Config -// pointer for chaining. -func (c *Config) WithUseDualStack(enable bool) *Config { - c.UseDualStack = &enable - return c -} - -// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value -// returning a Config pointer for chaining. -func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { - c.EC2MetadataDisableTimeoutOverride = &enable - return c -} - -// WithSleepDelay overrides the function used to sleep while waiting for the -// next retry. Defaults to time.Sleep. -func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { - c.SleepDelay = fn - return c -} - -// WithEndpointDiscovery will set whether or not to use endpoint discovery. -func (c *Config) WithEndpointDiscovery(t bool) *Config { - c.EnableEndpointDiscovery = &t - return c -} - -// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix -// when making requests. -func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { - c.DisableEndpointHostPrefix = &t - return c -} - -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - -// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag -// when resolving the endpoint for a service -func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { - c.STSRegionalEndpoint = sre - return c -} - -// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag -// when resolving the endpoint for a service -func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { - c.S3UsEast1RegionalEndpoint = sre - return c -} - -func mergeInConfig(dst *Config, other *Config) { - if other == nil { - return - } - - if other.CredentialsChainVerboseErrors != nil { - dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors - } - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.EndpointResolver != nil { - dst.EndpointResolver = other.EndpointResolver - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.Retryer != nil { - dst.Retryer = other.Retryer - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - if other.S3Disable100Continue != nil { - dst.S3Disable100Continue = other.S3Disable100Continue - } - - if other.S3UseAccelerate != nil { - dst.S3UseAccelerate = other.S3UseAccelerate - } - - if other.S3DisableContentMD5Validation != nil { - dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation - } - - if other.S3UseARNRegion != nil { - dst.S3UseARNRegion = other.S3UseARNRegion - } - - if other.UseDualStack != nil { - dst.UseDualStack = other.UseDualStack - } - - if other.EC2MetadataDisableTimeoutOverride != nil { - dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride - } - - if other.SleepDelay != nil { - dst.SleepDelay = other.SleepDelay - } - - if other.DisableRestProtocolURICleaning != nil { - dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning - } - - if other.EnforceShouldRetryCheck != nil { - dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck - } - - if other.EnableEndpointDiscovery != nil { - dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery - } - - if other.DisableEndpointHostPrefix != nil { - dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix - } - - if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { - dst.STSRegionalEndpoint = other.STSRegionalEndpoint - } - - if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { - dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint - } -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c *Config) Copy(cfgs ...*Config) *Config { - dst := &Config{} - dst.MergeIn(c) - - for _, cfg := range cfgs { - dst.MergeIn(cfg) - } - - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go deleted file mode 100644 index 2866f9a7..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build !go1.9 - -package aws - -import "time" - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go deleted file mode 100644 index 3718b26e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.9 - -package aws - -import "context" - -// Context is an alias of the Go stdlib's context.Context interface. -// It can be used within the SDK's API operation "WithContext" methods. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go deleted file mode 100644 index 66c5945d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build !go1.7 - -package aws - -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case backgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -var ( - backgroundCtx = new(emptyCtx) -) - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return backgroundCtx -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go deleted file mode 100644 index 9c29f29a..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build go1.7 - -package aws - -import "context" - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return context.Background() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go deleted file mode 100644 index 304fd156..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go +++ /dev/null @@ -1,24 +0,0 @@ -package aws - -import ( - "time" -) - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go deleted file mode 100644 index 4e076c18..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ /dev/null @@ -1,918 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint returns a pointer to the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pointer passed in or -// 0 if the pointer is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values uinto a slice of -// uint pointers -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pointers uinto a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values uinto a string -// map of uint pointers -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pointers uinto a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int8 returns a pointer to the int8 value passed in. -func Int8(v int8) *int8 { - return &v -} - -// Int8Value returns the value of the int8 pointer passed in or -// 0 if the pointer is nil. -func Int8Value(v *int8) int8 { - if v != nil { - return *v - } - return 0 -} - -// Int8Slice converts a slice of int8 values into a slice of -// int8 pointers -func Int8Slice(src []int8) []*int8 { - dst := make([]*int8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int8ValueSlice converts a slice of int8 pointers into a slice of -// int8 values -func Int8ValueSlice(src []*int8) []int8 { - dst := make([]int8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int8Map converts a string map of int8 values into a string -// map of int8 pointers -func Int8Map(src map[string]int8) map[string]*int8 { - dst := make(map[string]*int8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int8ValueMap converts a string map of int8 pointers into a string -// map of int8 values -func Int8ValueMap(src map[string]*int8) map[string]int8 { - dst := make(map[string]int8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int16 returns a pointer to the int16 value passed in. -func Int16(v int16) *int16 { - return &v -} - -// Int16Value returns the value of the int16 pointer passed in or -// 0 if the pointer is nil. -func Int16Value(v *int16) int16 { - if v != nil { - return *v - } - return 0 -} - -// Int16Slice converts a slice of int16 values into a slice of -// int16 pointers -func Int16Slice(src []int16) []*int16 { - dst := make([]*int16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int16ValueSlice converts a slice of int16 pointers into a slice of -// int16 values -func Int16ValueSlice(src []*int16) []int16 { - dst := make([]int16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int16Map converts a string map of int16 values into a string -// map of int16 pointers -func Int16Map(src map[string]int16) map[string]*int16 { - dst := make(map[string]*int16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int16ValueMap converts a string map of int16 pointers into a string -// map of int16 values -func Int16ValueMap(src map[string]*int16) map[string]int16 { - dst := make(map[string]int16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int32 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int32 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint8 returns a pointer to the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return &v -} - -// Uint8Value returns the value of the uint8 pointer passed in or -// 0 if the pointer is nil. -func Uint8Value(v *uint8) uint8 { - if v != nil { - return *v - } - return 0 -} - -// Uint8Slice converts a slice of uint8 values into a slice of -// uint8 pointers -func Uint8Slice(src []uint8) []*uint8 { - dst := make([]*uint8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint8ValueSlice converts a slice of uint8 pointers into a slice of -// uint8 values -func Uint8ValueSlice(src []*uint8) []uint8 { - dst := make([]uint8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint8Map converts a string map of uint8 values into a string -// map of uint8 pointers -func Uint8Map(src map[string]uint8) map[string]*uint8 { - dst := make(map[string]*uint8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint8ValueMap converts a string map of uint8 pointers into a string -// map of uint8 values -func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { - dst := make(map[string]uint8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint16 returns a pointer to the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Value returns the value of the uint16 pointer passed in or -// 0 if the pointer is nil. -func Uint16Value(v *uint16) uint16 { - if v != nil { - return *v - } - return 0 -} - -// Uint16Slice converts a slice of uint16 values into a slice of -// uint16 pointers -func Uint16Slice(src []uint16) []*uint16 { - dst := make([]*uint16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint16ValueSlice converts a slice of uint16 pointers into a slice of -// uint16 values -func Uint16ValueSlice(src []*uint16) []uint16 { - dst := make([]uint16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint16Map converts a string map of uint16 values into a string -// map of uint16 pointers -func Uint16Map(src map[string]uint16) map[string]*uint16 { - dst := make(map[string]*uint16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint16ValueMap converts a string map of uint16 pointers into a string -// map of uint16 values -func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { - dst := make(map[string]uint16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pointer to the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint32 pointer passed in or -// 0 if the pointer is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint32 values into a slice of -// uint32 pointers -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pointers into a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values into a string -// map of uint32 pointers -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pointers into a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pointer to the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pointer passed in or -// 0 if the pointer is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values into a slice of -// uint64 pointers -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pointers into a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values into a string -// map of uint64 pointers -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pointers into a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float32 returns a pointer to the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Value returns the value of the float32 pointer passed in or -// 0 if the pointer is nil. -func Float32Value(v *float32) float32 { - if v != nil { - return *v - } - return 0 -} - -// Float32Slice converts a slice of float32 values into a slice of -// float32 pointers -func Float32Slice(src []float32) []*float32 { - dst := make([]*float32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float32ValueSlice converts a slice of float32 pointers into a slice of -// float32 values -func Float32ValueSlice(src []*float32) []float32 { - dst := make([]float32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float32Map converts a string map of float32 values into a string -// map of float32 pointers -func Float32Map(src map[string]float32) map[string]*float32 { - dst := make(map[string]*float32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float32ValueMap converts a string map of float32 pointers into a string -// map of float32 values -func Float32ValueMap(src map[string]*float32) map[string]float32 { - dst := make(map[string]float32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// SecondsTimeValue converts an int64 pointer to a time.Time value -// representing seconds since Epoch or time.Time{} if the pointer is nil. -func SecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix((*v / 1000), 0) - } - return time.Time{} -} - -// MillisecondsTimeValue converts an int64 pointer to a time.Time value -// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. -func MillisecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix(0, (*v * 1000000)) - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go deleted file mode 100644 index aa902d70..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ /dev/null @@ -1,230 +0,0 @@ -package corehandlers - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be added to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - if r.Body != nil { - var err error - length, err = aws.SeekerLen(r.Body) - if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) - return - } - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// ValidateReqSigHandler is a request handler to ensure that the request's -// signature doesn't expire before it is sent. This can happen when a request -// is built and signed significantly before it is sent. Or significant delays -// occur when retrying requests that would cause the signature to expire. -var ValidateReqSigHandler = request.NamedHandler{ - Name: "core.ValidateReqSigHandler", - Fn: func(r *request.Request) { - // Unsigned requests are not signed - if r.Config.Credentials == credentials.AnonymousCredentials { - return - } - - signedTime := r.Time - if !r.LastSignedAt.IsZero() { - signedTime = r.LastSignedAt - } - - // 5 minutes to allow for some clock skew/delays in transmission. - // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(5 * time.Minute).After(time.Now()) { - return - } - - fmt.Println("request expired, resigning") - r.Sign() - }, -} - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{ - Name: "core.SendHandler", - Fn: func(r *request.Request) { - sender := sendFollowRedirects - if r.DisableFollowRedirects { - sender = sendWithoutFollowRedirects - } - - if request.NoBody == r.HTTPRequest.Body { - // Strip off the request body if the NoBody reader was used as a - // place holder for a request body. This prevents the SDK from - // making requests with a request body when it would be invalid - // to do so. - // - // Use a shallow copy of the http.Request to ensure the race condition - // of transport on Body will not trigger - reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest - reqCopy.Body = nil - r.HTTPRequest = &reqCopy - defer func() { - r.HTTPRequest = reqOrig - }() - } - - var err error - r.HTTPResponse, err = sender(r) - if err != nil { - handleSendError(r, err) - } - }, -} - -func sendFollowRedirects(r *request.Request) (*http.Response, error) { - return r.Config.HTTPClient.Do(r.HTTPRequest) -} - -func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { - transport := r.Config.HTTPClient.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return transport.RoundTrip(r.HTTPRequest) -} - -func handleSendError(r *request.Request, err error) { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other URL redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all request errors, and let the default retrier determine - // if the error is retryable. - r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) - - // Override the error with a context canceled error, if that was canceled. - ctx := r.Context() - select { - case <-ctx.Done(): - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", ctx.Err()) - r.Retryable = aws.Bool(false) - default: - } -} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", nil) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{ - Name: "core.AfterRetryHandler", - Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } - - r.RetryCount++ - r.Error = nil - } - }} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { - if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { - r.Error = aws.ErrMissingRegion - } else if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go deleted file mode 100644 index 7d50b155..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package corehandlers - -import "github.com/aws/aws-sdk-go/aws/request" - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(request.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go deleted file mode 100644 index ab69c7a6..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ /dev/null @@ -1,37 +0,0 @@ -package corehandlers - -import ( - "os" - "runtime" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version -// to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -const execEnvVar = `AWS_EXECUTION_ENV` -const execEnvUAKey = `exec-env` - -// AddHostExecEnvUserAgentHander is a request handler appending the SDK's -// execution environment to the user agent. -// -// If the environment variable AWS_EXECUTION_ENV is set, its value will be -// appended to the user agent string. -var AddHostExecEnvUserAgentHander = request.NamedHandler{ - Name: "core.AddHostExecEnvUserAgentHander", - Fn: func(r *request.Request) { - v := os.Getenv(execEnvVar) - if len(v) == 0 { - return - } - - request.AddToUserAgent(r, execEnvUAKey+"/"+v) - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go deleted file mode 100644 index 3ad1e798..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ /dev/null @@ -1,100 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var ( - // ErrNoValidProvidersFoundInChain Is returned when there are no valid - // providers in the ChainProvider. - // - // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true. - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. - For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, - nil) -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// via the environment variables. If there are none ChainProvider will check -// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvProvider{}, -// &ec2rolecreds.EC2RoleProvider{ -// Client: ec2metadata.New(sess), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: creds, -// }))) -// -type ChainProvider struct { - Providers []Provider - curr Provider - VerboseErrors bool -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return NewCredentials(&ChainProvider{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) Retrieve() (Value, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - c.curr = p - return creds, nil - } - errs = append(errs, err) - } - c.curr = nil - - var err error - err = ErrNoValidProvidersFoundInChain - if c.VerboseErrors { - err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) - } - return Value{}, err -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *ChainProvider) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go deleted file mode 100644 index 4af59215..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ /dev/null @@ -1,299 +0,0 @@ -// Package credentials provides credential retrieval and management -// -// The Credentials is the primary method of getting access to and managing -// credentials Values. Using dependency injection retrieval of the credential -// values is handled by a object which satisfies the Provider interface. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials Value have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := credentials.NewEnvCredentials() -// -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := credentials.NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials - -import ( - "fmt" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// AnonymousCredentials is an empty Credential object that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This Credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// svc := s3.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: credentials.AnonymousCredentials, -// }))) -// // Access public S3 buckets. -var AnonymousCredentials = NewStaticCredentials("", "", "") - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// HasKeys returns if the credentials Value has both AccessKeyID and -// SecretAccessKey value set. -func (v Value) HasKeys() bool { - return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -// -// The Provider should not need to implement its own mutexes, because -// that will be managed by Credentials. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// An Expirer is an interface that Providers can implement to expose the expiration -// time, if known. If the Provider cannot accurately provide this info, -// it should not implement this interface. -type Expirer interface { - // The time at which the credentials are no longer valid - ExpiresAt() time.Time -} - -// An ErrorProvider is a stub credentials provider that always returns an error -// this is used by the SDK when construction a known provider is not possible -// due to an error. -type ErrorProvider struct { - // The error to be returned from Retrieve - Err error - - // The provider name to set on the Retrieved returned Value - ProviderName string -} - -// Retrieve will always return the error that the ErrorProvider was created with. -func (p ErrorProvider) Retrieve() (Value, error) { - return Value{ProviderName: p.ProviderName}, p.Err -} - -// IsExpired will always return not expired. -func (p ErrorProvider) IsExpired() bool { - return false -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. Available for testing - // to be able to mock out the current time. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - curTime := e.CurrentTime - if curTime == nil { - curTime = time.Now - } - return e.expiration.Before(curTime()) -} - -// ExpiresAt returns the expiration time of the credential -func (e *Expiry) ExpiresAt() time.Time { - return e.expiration -} - -// A Credentials provides concurrency safe retrieval of AWS credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - creds Value - forceRefresh bool - - m sync.RWMutex - - provider Provider -} - -// NewCredentials returns a pointer to a new Credentials with the provider set. -func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, - } -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - // Check the cached credentials first with just the read lock. - c.m.RLock() - if !c.isExpired() { - creds := c.creds - c.m.RUnlock() - return creds, nil - } - c.m.RUnlock() - - // Credentials are expired need to retrieve the credentials taking the full - // lock. - c.m.Lock() - defer c.m.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.forceRefresh = true -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be retrieved. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.m.RLock() - defer c.m.RUnlock() - - return c.isExpired() -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() -} - -// ExpiresAt provides access to the functionality of the Expirer interface of -// the underlying Provider, if it supports that interface. Otherwise, it returns -// an error. -func (c *Credentials) ExpiresAt() (time.Time, error) { - c.m.RLock() - defer c.m.RUnlock() - - expirer, ok := c.provider.(Expirer) - if !ok { - return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), - nil) - } - if c.forceRefresh { - // set expiration time to the distant past - return time.Time{}, nil - } - return expirer.ExpiresAt(), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go deleted file mode 100644 index 43d4ed38..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ /dev/null @@ -1,180 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &ec2rolecreds.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: ec2metadata.New(sess, aws.Config{ -// HTTPClient: &http.Client{Timeout: 10 * time.Second}, -// }), -// -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - credentials.Expiry - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. -// The ConfigProvider is satisfied by the session.Session type. -func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: ec2metadata.New(c), - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 -// metadata service. -func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: client, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - if len(credsList) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(m.Client, credsName) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - ProviderName: ProviderName, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "iam/security-credentials/" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New(request.ErrCodeSerialization, - "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New(request.ErrCodeSerialization, - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go deleted file mode 100644 index 1a7af53a..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ /dev/null @@ -1,203 +0,0 @@ -// Package endpointcreds provides support for retrieving credentials from an -// arbitrary HTTP endpoint. -// -// The credentials endpoint Provider can receive both static and refreshable -// credentials that will expire. Credentials are static when an "Expiration" -// value is not provided in the endpoint's response. -// -// Static credentials will never expire once they have been retrieved. The format -// of the static credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } -// -// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration -// value in the response. The format of the refreshable credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } -// -// Errors should be returned in the following format and only returned with 400 -// or 500 HTTP status codes. -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } -package endpointcreds - -import ( - "encoding/json" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" -) - -// ProviderName is the name of the credentials provider. -const ProviderName = `CredentialsEndpointProvider` - -// Provider satisfies the credentials.Provider interface, and is a client to -// retrieve credentials from an arbitrary endpoint. -type Provider struct { - staticCreds bool - credentials.Expiry - - // Requires a AWS Client to make HTTP requests to the endpoint with. - // the Endpoint the request will be made to is provided by the aws.Config's - // Endpoint value. - Client *client.Client - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // Optional authorization token value if set will be used as the value of - // the Authorization header of the endpoint credential request. - AuthorizationToken string -} - -// NewProviderClient returns a credentials Provider for retrieving AWS credentials -// from arbitrary endpoint. -func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { - p := &Provider{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: "CredentialsEndpoint", - Endpoint: endpoint, - }, - handlers, - ), - } - - p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) - p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) - p.Client.Handlers.Validate.Clear() - p.Client.Handlers.Validate.PushBack(validateEndpointHandler) - - for _, option := range options { - option(p) - } - - return p -} - -// NewCredentialsClient returns a pointer to a new Credentials object -// wrapping the endpoint credentials Provider. -func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { - return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *Provider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// Retrieve will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) Retrieve() (credentials.Value, error) { - resp, err := p.getCredentials() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("CredentialsEndpointError", "failed to load credentials", err) - } - - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } else { - p.staticCreds = true - } - - return credentials.Value{ - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.Token, - ProviderName: ProviderName, - }, nil -} - -type getCredentialsOutput struct { - Expiration *time.Time - AccessKeyID string - SecretAccessKey string - Token string -} - -type errorOutput struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (p *Provider) getCredentials() (*getCredentialsOutput, error) { - op := &request.Operation{ - Name: "GetCredentials", - HTTPMethod: "GET", - } - - out := &getCredentialsOutput{} - req := p.Client.NewRequest(op, nil, out) - req.HTTPRequest.Header.Set("Accept", "application/json") - if authToken := p.AuthorizationToken; len(authToken) != 0 { - req.HTTPRequest.Header.Set("Authorization", authToken) - } - - return out, req.Send() -} - -func validateEndpointHandler(r *request.Request) { - if len(r.ClientInfo.Endpoint) == 0 { - r.Error = aws.ErrMissingEndpoint - } -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - out := r.Data.(*getCredentialsOutput) - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, - "failed to decode endpoint credentials", - err, - ) - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var errOut errorOutput - err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to decode error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New(errOut.Code, errOut.Message, nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go deleted file mode 100644 index 54c5cf73..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ /dev/null @@ -1,74 +0,0 @@ -package credentials - -import ( - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// EnvProviderName provides a name of Env provider -const EnvProviderName = "EnvProvider" - -var ( - // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be - // found in the process's environment. - ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) - - // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key - // can't be found in the process's environment. - ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) -) - -// A EnvProvider retrieves credentials from the environment variables of the -// running process. Environment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY -// -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY -type EnvProvider struct { - retrieved bool -} - -// NewEnvCredentials returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvCredentials() *Credentials { - return NewCredentials(&EnvProvider{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvProvider) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - if id == "" { - return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound - } - - if secret == "" { - return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - ProviderName: EnvProviderName, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvProvider) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go deleted file mode 100644 index e6248360..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ /dev/null @@ -1,426 +0,0 @@ -/* -Package processcreds is a credential Provider to retrieve `credential_process` -credentials. - -WARNING: The following describes a method of sourcing credentials from an external -process. This can potentially be dangerous, so proceed with caution. Other -credential providers should be preferred if at all possible. If using this -option, you should make sure that the config file is as locked down as possible -using security best practices for your operating system. - -You can use credentials from a `credential_process` in a variety of ways. - -One way is to setup your shared config file, located in the default -location, with the `credential_process` key and the command you want to be -called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable -(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. - - [default] - credential_process = /command/to/call - -Creating a new session will use the credential process to retrieve credentials. -NOTE: If there are credentials in the profile you are using, the credential -process will not be used. - - // Initialize a session to load credentials. - sess, _ := session.NewSession(&aws.Config{ - Region: aws.String("us-east-1")}, - ) - - // Create S3 service client to use the credentials. - svc := s3.New(sess) - -Another way to use the `credential_process` method is by using -`credentials.NewCredentials()` and providing a command to be executed to -retrieve credentials: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentials("/path/to/command") - - // Create service client value configured for credentials. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -You can set a non-default timeout for the `credential_process` with another -constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To -set a one minute timeout: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentialsTimeout( - "/path/to/command", - time.Duration(500) * time.Millisecond) - -If you need more control, you can set any configurable options in the -credentials using one or more option functions. For example, you can set a two -minute timeout, a credential duration of 60 minutes, and a maximum stdout -buffer size of 2k. - - creds := processcreds.NewCredentials( - "/path/to/command", - func(opt *ProcessProvider) { - opt.Timeout = time.Duration(2) * time.Minute - opt.Duration = time.Duration(60) * time.Minute - opt.MaxBufSize = 2048 - }) - -You can also use your own `exec.Cmd`: - - // Create an exec.Cmd - myCommand := exec.Command("/path/to/command") - - // Create credentials using your exec.Cmd and custom timeout - creds := processcreds.NewCredentialsCommand( - myCommand, - func(opt *processcreds.ProcessProvider) { - opt.Timeout = time.Duration(1) * time.Second - }) -*/ -package processcreds - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "runtime" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ProviderName is the name this credentials provider will label any - // returned credentials Value with. - ProviderName = `ProcessProvider` - - // ErrCodeProcessProviderParse error parsing process output - ErrCodeProcessProviderParse = "ProcessProviderParseError" - - // ErrCodeProcessProviderVersion version error in output - ErrCodeProcessProviderVersion = "ProcessProviderVersionError" - - // ErrCodeProcessProviderRequired required attribute missing in output - ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" - - // ErrCodeProcessProviderExecution execution of command failed - ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" - - // errMsgProcessProviderTimeout process took longer than allowed - errMsgProcessProviderTimeout = "credential process timed out" - - // errMsgProcessProviderProcess process error - errMsgProcessProviderProcess = "error in credential_process" - - // errMsgProcessProviderParse problem parsing output - errMsgProcessProviderParse = "parse failed of credential_process output" - - // errMsgProcessProviderVersion version error in output - errMsgProcessProviderVersion = "wrong version in process output (not 1)" - - // errMsgProcessProviderMissKey missing access key id in output - errMsgProcessProviderMissKey = "missing AccessKeyId in process output" - - // errMsgProcessProviderMissSecret missing secret acess key in output - errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" - - // errMsgProcessProviderPrepareCmd prepare of command failed - errMsgProcessProviderPrepareCmd = "failed to prepare command" - - // errMsgProcessProviderEmptyCmd command must not be empty - errMsgProcessProviderEmptyCmd = "command must not be empty" - - // errMsgProcessProviderPipe failed to initialize pipe - errMsgProcessProviderPipe = "failed to initialize pipe" - - // DefaultDuration is the default amount of time in minutes that the - // credentials will be valid for. - DefaultDuration = time.Duration(15) * time.Minute - - // DefaultBufSize limits buffer size from growing to an enormous - // amount due to a faulty process. - DefaultBufSize = int(8 * sdkio.KibiByte) - - // DefaultTimeout default limit on time a process can run. - DefaultTimeout = time.Duration(1) * time.Minute -) - -// ProcessProvider satisfies the credentials.Provider interface, and is a -// client to retrieve credentials from a process. -type ProcessProvider struct { - staticCreds bool - credentials.Expiry - originalCommand []string - - // Expiry duration of the credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // A string representing an os command that should return a JSON with - // credential information. - command *exec.Cmd - - // MaxBufSize limits memory usage from growing to an enormous - // amount due to a faulty process. - MaxBufSize int - - // Timeout limits the time a process can run. - Timeout time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// ProcessProvider. The credentials will expire every 15 minutes by default. -func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: exec.Command(command), - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsTimeout returns a pointer to a new Credentials object with -// the specified command and timeout, and default duration and max buffer size. -func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { - p := NewCredentials(command, func(opt *ProcessProvider) { - opt.Timeout = timeout - }) - - return p -} - -// NewCredentialsCommand returns a pointer to a new Credentials object with -// the specified command, and default timeout, duration and max buffer size. -func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: command, - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` - SecretAccessKey string - SessionToken string - Expiration *time.Time -} - -// Retrieve executes the 'credential_process' and returns the credentials. -func (p *ProcessProvider) Retrieve() (credentials.Value, error) { - out, err := p.executeCredentialProcess() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // Serialize and validate response - resp := &credentialProcessResponse{} - if err = json.Unmarshal(out, resp); err != nil { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderParse, - fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), - err) - } - - if resp.Version != 1 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderVersion, - errMsgProcessProviderVersion, - nil) - } - - if len(resp.AccessKeyID) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissKey, - nil) - } - - if len(resp.SecretAccessKey) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissSecret, - nil) - } - - // Handle expiration - p.staticCreds = resp.Expiration == nil - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } - - return credentials.Value{ - ProviderName: ProviderName, - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.SessionToken, - }, nil -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *ProcessProvider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// prepareCommand prepares the command to be executed. -func (p *ProcessProvider) prepareCommand() error { - - var cmdArgs []string - if runtime.GOOS == "windows" { - cmdArgs = []string{"cmd.exe", "/C"} - } else { - cmdArgs = []string{"sh", "-c"} - } - - if len(p.originalCommand) == 0 { - p.originalCommand = make([]string, len(p.command.Args)) - copy(p.originalCommand, p.command.Args) - - // check for empty command because it succeeds - if len(strings.TrimSpace(p.originalCommand[0])) < 1 { - return awserr.New( - ErrCodeProcessProviderExecution, - fmt.Sprintf( - "%s: %s", - errMsgProcessProviderPrepareCmd, - errMsgProcessProviderEmptyCmd), - nil) - } - } - - cmdArgs = append(cmdArgs, p.originalCommand...) - p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) - p.command.Env = os.Environ() - - return nil -} - -// executeCredentialProcess starts the credential process on the OS and -// returns the results or an error. -func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { - - if err := p.prepareCommand(); err != nil { - return nil, err - } - - // Setup the pipes - outReadPipe, outWritePipe, err := os.Pipe() - if err != nil { - return nil, awserr.New( - ErrCodeProcessProviderExecution, - errMsgProcessProviderPipe, - err) - } - - p.command.Stderr = os.Stderr // display stderr on console for MFA - p.command.Stdout = outWritePipe // get creds json on process's stdout - p.command.Stdin = os.Stdin // enable stdin for MFA - - output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) - - stdoutCh := make(chan error, 1) - go readInput( - io.LimitReader(outReadPipe, int64(p.MaxBufSize)), - output, - stdoutCh) - - execCh := make(chan error, 1) - go executeCommand(*p.command, execCh) - - finished := false - var errors []error - for !finished { - select { - case readError := <-stdoutCh: - errors = appendError(errors, readError) - finished = true - case execError := <-execCh: - err := outWritePipe.Close() - errors = appendError(errors, err) - errors = appendError(errors, execError) - if errors != nil { - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderProcess, - errors) - } - case <-time.After(p.Timeout): - finished = true - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderTimeout, - errors) // errors can be nil - } - } - - out := output.Bytes() - - if runtime.GOOS == "windows" { - // windows adds slashes to quotes - out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) - } - - return out, nil -} - -// appendError conveniently checks for nil before appending slice -func appendError(errors []error, err error) []error { - if err != nil { - return append(errors, err) - } - return errors -} - -func executeCommand(cmd exec.Cmd, exec chan error) { - // Start the command - err := cmd.Start() - if err == nil { - err = cmd.Wait() - } - - exec <- err -} - -func readInput(r io.Reader, w io.Writer, read chan error) { - tee := io.TeeReader(r, w) - - _, err := ioutil.ReadAll(tee) - - if err == io.EOF { - err = nil - } - - read <- err // will only arrive here when write end of pipe is closed -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go deleted file mode 100644 index e1551495..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ /dev/null @@ -1,150 +0,0 @@ -package credentials - -import ( - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/ini" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredsProviderName provides a name of SharedCreds provider -const SharedCredsProviderName = "SharedCredentialsProvider" - -var ( - // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) -) - -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type SharedCredentialsProvider struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewSharedCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewSharedCredentials(filename, profile string) *Credentials { - return NewCredentials(&SharedCredentialsProvider{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *SharedCredentialsProvider) Retrieve() (Value, error) { - p.retrieved = false - - filename, err := p.filename() - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - creds, err := loadProfile(filename, p.profile()) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - p.retrieved = true - return creds, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *SharedCredentialsProvider) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (Value, error) { - config, err := ini.OpenFile(filename) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) - } - - iniProfile, ok := config.GetSection(profile) - if !ok { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) - } - - id := iniProfile.String("aws_access_key_id") - if len(id) == 0 { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", - fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - nil) - } - - secret := iniProfile.String("aws_secret_access_key") - if len(secret) == 0 { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", - fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), - nil) - } - - // Default to empty string if not found - token := iniProfile.String("aws_session_token") - - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - ProviderName: SharedCredsProviderName, - }, nil -} - -// filename returns the filename to use to read AWS shared credentials. -// -// Will return an error if the user's home directory path cannot be found. -func (p *SharedCredentialsProvider) filename() (string, error) { - if len(p.Filename) != 0 { - return p.Filename, nil - } - - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { - return p.Filename, nil - } - - if home := shareddefaults.UserHomeDir(); len(home) == 0 { - // Backwards compatibility of home directly not found error being returned. - // This error is too verbose, failure when opening the file would of been - // a better error to return. - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = shareddefaults.SharedCredentialsFilename() - - return p.Filename, nil -} - -// profile returns the AWS shared credentials profile. If empty will read -// environment variable "AWS_PROFILE". If that is not set profile will -// return "default". -func (p *SharedCredentialsProvider) profile() string { - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - } - if p.Profile == "" { - p.Profile = "default" - } - - return p.Profile -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go deleted file mode 100644 index 531139e3..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ /dev/null @@ -1,55 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. -func NewStaticCredentials(id, secret, token string) *Credentials { - return NewCredentials(&StaticProvider{Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - }}) -} - -// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object -// wrapping the static credentials value provide. Same as NewStaticCredentials -// but takes the creds Value instead of individual fields -func NewStaticCredentialsFromCreds(creds Value) *Credentials { - return NewCredentials(&StaticProvider{Value: creds}) -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - if len(s.Value.ProviderName) == 0 { - s.Value.ProviderName = StaticProviderName - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticProvider, the credentials never expired. -func (s *StaticProvider) IsExpired() bool { - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go deleted file mode 100644 index 9f37f44b..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ /dev/null @@ -1,321 +0,0 @@ -/* -Package stscreds are credential Providers to retrieve STS AWS credentials. - -STS provides multiple ways to retrieve credentials which can be used when making -future AWS service API operation calls. - -The SDK will ensure that per instance of credentials.Credentials all requests -to refresh the credentials will be synchronized. But, the SDK is unable to -ensure synchronous usage of the AssumeRoleProvider if the value is shared -between multiple Credentials, Sessions or service clients. - -Assume Role - -To assume an IAM role using STS with the SDK you can create a new Credentials -with the SDKs's stscreds package. - - // Initial credentials loaded from SDK's default credential chain. Such as - // the environment, shared credentials (~/.aws/credentials), or EC2 Instance - // Role. These credentials will be used to to make the STS Assume Role API. - sess := session.Must(session.NewSession()) - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. - creds := stscreds.NewCredentials(sess, "myRoleArn") - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with static MFA Token - -To assume an IAM role with a MFA token you can either specify a MFA token code -directly or provide a function to prompt the user each time the credentials -need to refresh the role's credentials. Specifying the TokenCode should be used -for short lived operations that will not need to be refreshed, and when you do -not want to have direct control over the user provides their MFA token. - -With TokenCode the AssumeRoleProvider will be not be able to refresh the role's -credentials. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN using the MFA token code provided. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenCode = aws.String("00000000") - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with MFA Token Provider - -To assume an IAM role with MFA for longer running tasks where the credentials -may need to be refreshed setting the TokenProvider field of AssumeRoleProvider -will allow the credential provider to prompt for new MFA token code when the -role's credentials need to be refreshed. - -The StdinTokenProvider function is available to prompt on stdin to retrieve -the MFA token code from the user. You can also implement custom prompts by -satisfing the TokenProvider function signature. - -Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -have undesirable results as the StdinTokenProvider will not be synchronized. A -single Credentials with an AssumeRoleProvider can be shared safely. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenProvider = stscreds.StdinTokenProvider - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -*/ -package stscreds - -import ( - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/internal/sdkrand" - "github.com/aws/aws-sdk-go/service/sts" -) - -// StdinTokenProvider will prompt on stderr and read from stdin for a string value. -// An error is returned if reading from stdin fails. -// -// Use this function go read MFA tokens from stdin. The function makes no attempt -// to make atomic prompts from stdin across multiple gorouties. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely -// -// Will wait forever until something is provided on the stdin. -func StdinTokenProvider() (string, error) { - var v string - fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") - _, err := fmt.Scanln(&v) - - return v, err -} - -// ProviderName provides a name of AssumeRole provider -const ProviderName = "AssumeRoleProvider" - -// AssumeRoler represents the minimal subset of the STS client API used by this provider. -type AssumeRoler interface { - AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) -} - -// DefaultDuration is the default amount of time in minutes that the credentials -// will be valid for. -var DefaultDuration = time.Duration(15) * time.Minute - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. -// -// This credential provider will be used by the SDKs default credential change -// when shared configuration is enabled, and the shared config or shared credentials -// file configure assume role. See Session docs for how to do this. -// -// AssumeRoleProvider does not provide any synchronization and it is not safe -// to share this value across multiple Credentials, Sessions, or service clients -// without also sharing the same Credentials instance. -type AssumeRoleProvider struct { - credentials.Expiry - - // STS client to make assume role request with. - Client AssumeRoler - - // Role to be assumed. - RoleARN string - - // Session name, if you wish to reuse the credentials elsewhere. - RoleSessionName string - - // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. - Tags []*sts.Tag - - // A list of keys for session tags that you want to set as transitive. - // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. - TransitiveTagKeys []*string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // Optional ExternalID to pass along, defaults to nil if not set. - ExternalID *string - - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - SerialNumber *string - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // If SerialNumber is set and neither TokenCode nor TokenProvider are also - // set an error will be returned. - TokenCode *string - - // Async method of providing MFA token code for assuming an IAM role with MFA. - // The value returned by the function will be used as the TokenCode in the Retrieve - // call. See StdinTokenProvider for a provider that prompts and reads from stdin. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed when SerialNumber is also set and - // TokenCode is not set. - // - // If both TokenCode and TokenProvider is set, TokenProvider will be used and - // TokenCode is ignored. - TokenProvider func() (string, error) - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // MaxJitterFrac reduces the effective Duration of each credential requested - // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must - // have a value between 0 and 1. Any other value may lead to expected behavior. - // With a MaxJitterFrac value of 0, default) will no jitter will be used. - // - // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the - // AssumeRole call will be made with an arbitrary Duration between 27m and - // 30m. - // - // MaxJitterFrac should not be negative. - MaxJitterFrac float64 -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. -// -// Takes a Config provider to create the STS client. The ConfigProvider is -// satisfied by the session.Session type. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: sts.New(c), - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. -// -// Takes an AssumeRoler which can be satisfied by the STS client. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: svc, - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - // Apply defaults where parameters are not set. - if p.RoleSessionName == "" { - // Try to work out a role name that will hopefully end up unique. - p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) - } - if p.Duration == 0 { - // Expire as often as AWS permits. - p.Duration = DefaultDuration - } - jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) - input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, - Tags: p.Tags, - TransitiveTagKeys: p.TransitiveTagKeys, - } - if p.Policy != nil { - input.Policy = p.Policy - } - if p.SerialNumber != nil { - if p.TokenCode != nil { - input.SerialNumber = p.SerialNumber - input.TokenCode = p.TokenCode - } else if p.TokenProvider != nil { - input.SerialNumber = p.SerialNumber - code, err := p.TokenProvider() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - input.TokenCode = aws.String(code) - } else { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("AssumeRoleTokenNotAvailable", - "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) - } - } - - roleOutput, err := p.Client.AssumeRole(input) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // We will proactively generate new credentials before they expire. - p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: *roleOutput.Credentials.AccessKeyId, - SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, - SessionToken: *roleOutput.Credentials.SessionToken, - ProviderName: ProviderName, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go deleted file mode 100644 index b20b6339..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ /dev/null @@ -1,100 +0,0 @@ -package stscreds - -import ( - "fmt" - "io/ioutil" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" -) - -const ( - // ErrCodeWebIdentity will be used as an error code when constructing - // a new error to be returned during session creation or retrieval. - ErrCodeWebIdentity = "WebIdentityErr" - - // WebIdentityProviderName is the web identity provider name - WebIdentityProviderName = "WebIdentityCredentials" -) - -// now is used to return a time.Time object representing -// the current time. This can be used to easily test and -// compare test values. -var now = time.Now - -// WebIdentityRoleProvider is used to retrieve credentials using -// an OIDC token. -type WebIdentityRoleProvider struct { - credentials.Expiry - - client stsiface.STSAPI - ExpiryWindow time.Duration - - tokenFilePath string - roleARN string - roleSessionName string -} - -// NewWebIdentityCredentials will return a new set of credentials with a given -// configuration, role arn, and token file path. -func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { - svc := sts.New(c) - p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) - return credentials.NewCredentials(p) -} - -// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the -// provided stsiface.STSAPI -func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { - return &WebIdentityRoleProvider{ - client: svc, - tokenFilePath: path, - roleARN: roleARN, - roleSessionName: roleSessionName, - } -} - -// Retrieve will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { - b, err := ioutil.ReadFile(p.tokenFilePath) - if err != nil { - errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath) - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err) - } - - sessionName := p.roleSessionName - if len(sessionName) == 0 { - // session name is used to uniquely identify a session. This simply - // uses unix time in nanoseconds to uniquely identify sessions. - sessionName = strconv.FormatInt(now().UnixNano(), 10) - } - req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ - RoleArn: &p.roleARN, - RoleSessionName: &sessionName, - WebIdentityToken: aws.String(string(b)), - }) - // InvalidIdentityToken error is a temporary error that can occur - // when assuming an Role with a JWT web identity token. - req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) - if err := req.Send(); err != nil { - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) - } - - p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) - - value := credentials.Value{ - AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), - SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), - SessionToken: aws.StringValue(resp.Credentials.SessionToken), - ProviderName: WebIdentityProviderName, - } - return value, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go deleted file mode 100644 index 25a66d1d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package csm provides the Client Side Monitoring (CSM) client which enables -// sending metrics via UDP connection to the CSM agent. This package provides -// control options, and configuration for the CSM client. The client can be -// controlled manually, or automatically via the SDK's Session configuration. -// -// Enabling CSM client via SDK's Session configuration -// -// The CSM client can be enabled automatically via SDK's Session configuration. -// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT -// environment variable is set to a non-empty value. -// -// The configuration options for the CSM client via the SDK's session -// configuration are: -// -// * AWS_CSM_PORT= -// The port number the CSM agent will receive metrics on. -// -// * AWS_CSM_HOST= -// The hostname, or IP address the CSM agent will receive metrics on. -// Without port number. -// -// Manually enabling the CSM client -// -// The CSM client can be started, paused, and resumed manually. The Start -// function will enable the CSM client to publish metrics to the CSM agent. It -// is safe to call Start concurrently, but if Start is called additional times -// with different ClientID or address it will panic. -// -// r, err := csm.Start("clientID", ":31000") -// if err != nil { -// panic(fmt.Errorf("failed starting CSM: %v", err)) -// } -// -// When controlling the CSM client manually, you must also inject its request -// handlers into the SDK's Session configuration for the SDK's API clients to -// publish metrics. -// -// sess, err := session.NewSession(&aws.Config{}) -// if err != nil { -// panic(fmt.Errorf("failed loading session: %v", err)) -// } -// -// // Add CSM client's metric publishing request handlers to the SDK's -// // Session Configuration. -// r.InjectHandlers(&sess.Handlers) -// -// Controlling CSM client -// -// Once the CSM client has been enabled the Get function will return a Reporter -// value that you can use to pause and resume the metrics published to the CSM -// agent. If Get function is called before the reporter is enabled with the -// Start function or via SDK's Session configuration nil will be returned. -// -// The Pause method can be called to stop the CSM client publishing metrics to -// the CSM agent. The Continue method will resume metric publishing. -// -// // Get the CSM client Reporter. -// r := csm.Get() -// -// // Will pause monitoring -// r.Pause() -// resp, err = client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) -// -// // Resume monitoring -// r.Continue() -package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go deleted file mode 100644 index 4b19e280..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ /dev/null @@ -1,89 +0,0 @@ -package csm - -import ( - "fmt" - "strings" - "sync" -) - -var ( - lock sync.Mutex -) - -const ( - // DefaultPort is used when no port is specified. - DefaultPort = "31000" - - // DefaultHost is the host that will be used when none is specified. - DefaultHost = "127.0.0.1" -) - -// AddressWithDefaults returns a CSM address built from the host and port -// values. If the host or port is not set, default values will be used -// instead. If host is "localhost" it will be replaced with "127.0.0.1". -func AddressWithDefaults(host, port string) string { - if len(host) == 0 || strings.EqualFold(host, "localhost") { - host = DefaultHost - } - - if len(port) == 0 { - port = DefaultPort - } - - // Only IP6 host can contain a colon - if strings.Contains(host, ":") { - return "[" + host + "]:" + port - } - - return host + ":" + port -} - -// Start will start a long running go routine to capture -// client side metrics. Calling start multiple time will only -// start the metric listener once and will panic if a different -// client ID or port is passed in. -// -// r, err := csm.Start("clientID", "127.0.0.1:31000") -// if err != nil { -// panic(fmt.Errorf("expected no error, but received %v", err)) -// } -// sess := session.NewSession() -// r.InjectHandlers(sess.Handlers) -// -// svc := s3.New(sess) -// out, err := svc.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) -func Start(clientID string, url string) (*Reporter, error) { - lock.Lock() - defer lock.Unlock() - - if sender == nil { - sender = newReporter(clientID, url) - } else { - if sender.clientID != clientID { - panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) - } - - if sender.url != url { - panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) - } - } - - if err := connect(url); err != nil { - sender = nil - return nil, err - } - - return sender, nil -} - -// Get will return a reporter if one exists, if one does not exist, nil will -// be returned. -func Get() *Reporter { - lock.Lock() - defer lock.Unlock() - - return sender -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go deleted file mode 100644 index 5bacc791..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go +++ /dev/null @@ -1,109 +0,0 @@ -package csm - -import ( - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" -) - -type metricTime time.Time - -func (t metricTime) MarshalJSON() ([]byte, error) { - ns := time.Duration(time.Time(t).UnixNano()) - return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil -} - -type metric struct { - ClientID *string `json:"ClientId,omitempty"` - API *string `json:"Api,omitempty"` - Service *string `json:"Service,omitempty"` - Timestamp *metricTime `json:"Timestamp,omitempty"` - Type *string `json:"Type,omitempty"` - Version *int `json:"Version,omitempty"` - - AttemptCount *int `json:"AttemptCount,omitempty"` - Latency *int `json:"Latency,omitempty"` - - Fqdn *string `json:"Fqdn,omitempty"` - UserAgent *string `json:"UserAgent,omitempty"` - AttemptLatency *int `json:"AttemptLatency,omitempty"` - - SessionToken *string `json:"SessionToken,omitempty"` - Region *string `json:"Region,omitempty"` - AccessKey *string `json:"AccessKey,omitempty"` - HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` - XAmzID2 *string `json:"XAmzId2,omitempty"` - XAmzRequestID *string `json:"XAmznRequestId,omitempty"` - - AWSException *string `json:"AwsException,omitempty"` - AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` - SDKException *string `json:"SdkException,omitempty"` - SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` - - FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` - FinalAWSException *string `json:"FinalAwsException,omitempty"` - FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` - FinalSDKException *string `json:"FinalSdkException,omitempty"` - FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` - - DestinationIP *string `json:"DestinationIp,omitempty"` - ConnectionReused *int `json:"ConnectionReused,omitempty"` - - AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` - ConnectLatency *int `json:"ConnectLatency,omitempty"` - RequestLatency *int `json:"RequestLatency,omitempty"` - DNSLatency *int `json:"DnsLatency,omitempty"` - TCPLatency *int `json:"TcpLatency,omitempty"` - SSLLatency *int `json:"SslLatency,omitempty"` - - MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` -} - -func (m *metric) TruncateFields() { - m.ClientID = truncateString(m.ClientID, 255) - m.UserAgent = truncateString(m.UserAgent, 256) - - m.AWSException = truncateString(m.AWSException, 128) - m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) - - m.SDKException = truncateString(m.SDKException, 128) - m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) - - m.FinalAWSException = truncateString(m.FinalAWSException, 128) - m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) - - m.FinalSDKException = truncateString(m.FinalSDKException, 128) - m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) -} - -func truncateString(v *string, l int) *string { - if v != nil && len(*v) > l { - nv := (*v)[:l] - return &nv - } - - return v -} - -func (m *metric) SetException(e metricException) { - switch te := e.(type) { - case awsException: - m.AWSException = aws.String(te.exception) - m.AWSExceptionMessage = aws.String(te.message) - case sdkException: - m.SDKException = aws.String(te.exception) - m.SDKExceptionMessage = aws.String(te.message) - } -} - -func (m *metric) SetFinalException(e metricException) { - switch te := e.(type) { - case awsException: - m.FinalAWSException = aws.String(te.exception) - m.FinalAWSExceptionMessage = aws.String(te.message) - case sdkException: - m.FinalSDKException = aws.String(te.exception) - m.FinalSDKExceptionMessage = aws.String(te.message) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go deleted file mode 100644 index 82a3e345..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ /dev/null @@ -1,55 +0,0 @@ -package csm - -import ( - "sync/atomic" -) - -const ( - runningEnum = iota - pausedEnum -) - -var ( - // MetricsChannelSize of metrics to hold in the channel - MetricsChannelSize = 100 -) - -type metricChan struct { - ch chan metric - paused *int64 -} - -func newMetricChan(size int) metricChan { - return metricChan{ - ch: make(chan metric, size), - paused: new(int64), - } -} - -func (ch *metricChan) Pause() { - atomic.StoreInt64(ch.paused, pausedEnum) -} - -func (ch *metricChan) Continue() { - atomic.StoreInt64(ch.paused, runningEnum) -} - -func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(ch.paused) - return v == pausedEnum -} - -// Push will push metrics to the metric channel if the channel -// is not paused -func (ch *metricChan) Push(m metric) bool { - if ch.IsPaused() { - return false - } - - select { - case ch.ch <- m: - return true - default: - return false - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go deleted file mode 100644 index 54a99280..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go +++ /dev/null @@ -1,26 +0,0 @@ -package csm - -type metricException interface { - Exception() string - Message() string -} - -type requestException struct { - exception string - message string -} - -func (e requestException) Exception() string { - return e.exception -} -func (e requestException) Message() string { - return e.message -} - -type awsException struct { - requestException -} - -type sdkException struct { - requestException -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go deleted file mode 100644 index 835bcd49..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ /dev/null @@ -1,264 +0,0 @@ -package csm - -import ( - "encoding/json" - "net" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Reporter will gather metrics of API requests made and -// send those metrics to the CSM endpoint. -type Reporter struct { - clientID string - url string - conn net.Conn - metricsCh metricChan - done chan struct{} -} - -var ( - sender *Reporter -) - -func connect(url string) error { - const network = "udp" - if err := sender.connect(network, url); err != nil { - return err - } - - if sender.done == nil { - sender.done = make(chan struct{}) - go sender.start() - } - - return nil -} - -func newReporter(clientID, url string) *Reporter { - return &Reporter{ - clientID: clientID, - url: url, - metricsCh: newMetricChan(MetricsChannelSize), - } -} - -func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { - if rep == nil { - return - } - - now := time.Now() - creds, _ := r.Config.Credentials.Get() - - m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), - Region: r.Config.Region, - Type: aws.String("ApiCallAttempt"), - Version: aws.Int(1), - - XAmzRequestID: aws.String(r.RequestID), - - AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), - AccessKey: aws.String(creds.AccessKeyID), - } - - if r.HTTPResponse != nil { - m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetException(getMetricException(awserr)) - } - } - - m.TruncateFields() - rep.metricsCh.Push(m) -} - -func getMetricException(err awserr.Error) metricException { - msg := err.Error() - code := err.Code() - - switch code { - case request.ErrCodeRequestError, - request.ErrCodeSerialization, - request.CanceledErrorCode: - return sdkException{ - requestException{exception: code, message: msg}, - } - default: - return awsException{ - requestException{exception: code, message: msg}, - } - } -} - -func (rep *Reporter) sendAPICallMetric(r *request.Request) { - if rep == nil { - return - } - - now := time.Now() - m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), - Type: aws.String("ApiCall"), - AttemptCount: aws.Int(r.RetryCount + 1), - Region: r.Config.Region, - Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), - XAmzRequestID: aws.String(r.RequestID), - MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), - } - - if r.HTTPResponse != nil { - m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetFinalException(getMetricException(awserr)) - } - } - - m.TruncateFields() - - // TODO: Probably want to figure something out for logging dropped - // metrics - rep.metricsCh.Push(m) -} - -func (rep *Reporter) connect(network, url string) error { - if rep.conn != nil { - rep.conn.Close() - } - - conn, err := net.Dial(network, url) - if err != nil { - return awserr.New("UDPError", "Could not connect", err) - } - - rep.conn = conn - - return nil -} - -func (rep *Reporter) close() { - if rep.done != nil { - close(rep.done) - } - - rep.metricsCh.Pause() -} - -func (rep *Reporter) start() { - defer func() { - rep.metricsCh.Pause() - }() - - for { - select { - case <-rep.done: - rep.done = nil - return - case m := <-rep.metricsCh.ch: - // TODO: What to do with this error? Probably should just log - b, err := json.Marshal(m) - if err != nil { - continue - } - - rep.conn.Write(b) - } - } -} - -// Pause will pause the metric channel preventing any new metrics from being -// added. It is safe to call concurrently with other calls to Pause, but if -// called concurently with Continue can lead to unexpected state. -func (rep *Reporter) Pause() { - lock.Lock() - defer lock.Unlock() - - if rep == nil { - return - } - - rep.close() -} - -// Continue will reopen the metric channel and allow for monitoring to be -// resumed. It is safe to call concurrently with other calls to Continue, but -// if called concurently with Pause can lead to unexpected state. -func (rep *Reporter) Continue() { - lock.Lock() - defer lock.Unlock() - if rep == nil { - return - } - - if !rep.metricsCh.IsPaused() { - return - } - - rep.metricsCh.Continue() -} - -// Client side metric handler names -const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" -) - -// InjectHandlers will will enable client side metrics and inject the proper -// handlers to handle how metrics are sent. -// -// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers -// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). -// -// // Start must be called in order to inject the correct handlers -// r, err := csm.Start("clientID", "127.0.0.1:8094") -// if err != nil { -// panic(fmt.Errorf("expected no error, but received %v", err)) -// } -// -// sess := session.NewSession() -// r.InjectHandlers(&sess.Handlers) -// -// // create a new service client with our client side metric session -// svc := s3.New(sess) -func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { - if rep == nil { - return - } - - handlers.Complete.PushFrontNamed(request.NamedHandler{ - Name: APICallMetricHandlerName, - Fn: rep.sendAPICallMetric, - }) - - handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ - Name: APICallAttemptMetricHandlerName, - Fn: rep.sendAPICallAttemptMetric, - }) -} - -// boolIntValue return 1 for true and 0 for false. -func boolIntValue(b bool) int { - if b { - return 1 - } - - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go deleted file mode 100644 index 23bb639e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ /dev/null @@ -1,207 +0,0 @@ -// Package defaults is a collection of helpers to retrieve the SDK's default -// configuration and handlers. -// -// Generally this package shouldn't be used directly, but session.Session -// instead. This package is useful when you need to reset the defaults -// of a session or service client to the SDK defaults before setting -// additional parameters. -package defaults - -import ( - "fmt" - "net" - "net/http" - "net/url" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// A Defaults provides a collection of default values for SDK clients. -type Defaults struct { - Config *aws.Config - Handlers request.Handlers -} - -// Get returns the SDK's default values with Config and handlers pre-configured. -func Get() Defaults { - cfg := Config() - handlers := Handlers() - cfg.Credentials = CredChain(cfg, handlers) - - return Defaults{ - Config: cfg, - Handlers: handlers, - } -} - -// Config returns the default configuration without credentials. -// To retrieve a config with credentials also included use -// `defaults.Get().Config` instead. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the configuration of an -// existing service client or session. -func Config() *aws.Config { - return aws.NewConfig(). - WithCredentials(credentials.AnonymousCredentials). - WithRegion(os.Getenv("AWS_REGION")). - WithHTTPClient(http.DefaultClient). - WithMaxRetries(aws.UseServiceDefaultRetries). - WithLogger(aws.NewDefaultLogger()). - WithLogLevel(aws.LogOff). - WithEndpointResolver(endpoints.DefaultResolver()) -} - -// Handlers returns the default request handlers. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the request handlers of an -// existing service client or session. -func Handlers() request.Handlers { - var handlers request.Handlers - - handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) - handlers.Validate.AfterEachFn = request.HandlerListStopOnError - handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) - handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) - handlers.Build.AfterEachFn = request.HandlerListStopOnError - handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) - handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) - handlers.Send.PushBackNamed(corehandlers.SendHandler) - handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) - - return handlers -} - -// CredChain returns the default credential chain. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the credentials of an -// existing service client or session's Config. -func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - return credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: CredProviders(cfg, handlers), - }) -} - -// CredProviders returns the slice of providers used in -// the default credential chain. -// -// For applications that need to use some other provider (for example use -// different environment variables for legacy reasons) but still fall back -// on the default chain of providers. This allows that default chaint to be -// automatically updated -func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { - return []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - } -} - -const ( - httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" -) - -// RemoteCredProvider returns a credentials provider for the default remote -// endpoints such as EC2 or ECS Roles. -func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { - return localHTTPCredProvider(cfg, handlers, u) - } - - if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { - u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) - return httpCredProvider(cfg, handlers, u) - } - - return ec2RoleProvider(cfg, handlers) -} - -var lookupHostFn = net.LookupHost - -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil - } - - // Host is not an ip, perform lookup - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { - return false, nil - } - } - - return true, nil -} - -func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - var errMsg string - - parsed, err := url.Parse(u) - if err != nil { - errMsg = fmt.Sprintf("invalid URL, %v", err) - } else { - host := aws.URLHostname(parsed) - if len(host) == 0 { - errMsg = "unable to parse host from local HTTP cred provider URL" - } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { - errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) - } else if !isLoopback { - errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) - } - } - - if len(errMsg) > 0 { - if cfg.Logger != nil { - cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) - } - return credentials.ErrorProvider{ - Err: awserr.New("CredentialsEndpointError", errMsg, err), - ProviderName: endpointcreds.ProviderName, - } - } - - return httpCredProvider(cfg, handlers, u) -} - -func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - return endpointcreds.NewProviderClient(cfg, handlers, u, - func(p *endpointcreds.Provider) { - p.ExpiryWindow = 5 * time.Minute - p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) - }, - ) -} - -func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - resolver := cfg.EndpointResolver - if resolver == nil { - resolver = endpoints.DefaultResolver() - } - - e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") - return &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), - ExpiryWindow: 5 * time.Minute, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go deleted file mode 100644 index ca0ee1dc..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go +++ /dev/null @@ -1,27 +0,0 @@ -package defaults - -import ( - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return shareddefaults.SharedCredentialsFilename() -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return shareddefaults.SharedConfigFilename() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go deleted file mode 100644 index 4fcb6161..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/doc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.StringValue(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.StringValueSlice(strPtrs) -// -// SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go deleted file mode 100644 index 12897eef..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ /dev/null @@ -1,199 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// getToken uses the duration to return a token for EC2 metadata service, -// or an error if the request failed. -func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) { - op := &request.Operation{ - Name: "GetToken", - HTTPMethod: "PUT", - HTTPPath: "/api/token", - } - - var output tokenOutput - req := c.NewRequest(op, nil, &output) - - // remove the fetch token handler from the request handlers to avoid infinite recursion - req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) - - // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. - req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) - - ttl := strconv.FormatInt(int64(duration/time.Second), 10) - req.HTTPRequest.Header.Set(ttlHeader, ttl) - - err := req.Send() - - // Errors with bad request status should be returned. - if err != nil { - err = awserr.NewRequestFailure( - awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), - req.HTTPResponse.StatusCode, req.RequestID) - } - - return output, err -} - -// GetMetadata uses the path provided to request information from the EC2 -// instance metadata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - op := &request.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/meta-data", p), - } - output := &metadataOutput{} - - req := c.NewRequest(op, nil, output) - - err := req.Send() - return output.Content, err -} - -// GetUserData returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserData() (string, error) { - op := &request.Operation{ - Name: "GetUserData", - HTTPMethod: "GET", - HTTPPath: "/user-data", - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - err := req.Send() - return output.Content, err -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - op := &request.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - err := req.Send() - return output.Content, err -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New(request.ErrCodeSerialization, - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New(request.ErrCodeSerialization, - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocument() - if err != nil { - return "", err - } - // extract region from the ec2InstanceIdentityDocument - region := ec2InstanceIdentityDocument.Region - if len(region) == 0 { - return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) - } - // returns region - return region, nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshaling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - MarketplaceProductCodes []string `json:"marketplaceProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go deleted file mode 100644 index b8b2940d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ /dev/null @@ -1,228 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -// -// This package's client can be disabled completely by setting the environment -// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to -// true instructs the SDK to disable the EC2 Metadata client. The client cannot -// be used while the environment variable is set to true, (case insensitive). -package ec2metadata - -import ( - "bytes" - "errors" - "io" - "net/http" - "os" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - // ServiceName is the name of the service. - ServiceName = "ec2metadata" - disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" - - // Headers for Token and TTL - ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" - tokenHeader = "x-aws-ec2-metadata-token" - - // Named Handler constants - fetchTokenHandlerName = "FetchTokenHandler" - unmarshalMetadataHandlerName = "unmarshalMetadataHandler" - unmarshalTokenHandlerName = "unmarshalTokenHandler" - enableTokenProviderHandlerName = "enableTokenProviderHandler" - - // TTL constants - defaultTTL = 21600 * time.Second - ttlExpirationWindow = 30 * time.Second -) - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *client.Client -} - -// New creates a new instance of the EC2Metadata client with a session. -// This client is safe to use across multiple goroutines. -// -// -// Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) -// -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { - c := p.ClientConfig(ServiceName, cfgs...) - return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// NewClient returns a new EC2Metadata client. Should be used to create -// a client when not using a session. Generally using just New with a session -// is preferred. -// -// If an unmodified HTTP client is provided from the stdlib default, or no client -// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. -// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. -func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { - // If the http client is unmodified and this feature is not disabled - // set custom timeouts for EC2Metadata requests. - cfg.HTTPClient = &http.Client{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 1 * time.Second, - } - // max number of retries on the client operation - cfg.MaxRetries = aws.Int(2) - } - - svc := &EC2Metadata{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceName, - Endpoint: endpoint, - APIVersion: "latest", - }, - handlers, - ), - } - - // token provider instance - tp := newTokenProvider(svc, defaultTTL) - - // NamedHandler for fetching token - svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ - Name: fetchTokenHandlerName, - Fn: tp.fetchTokenHandler, - }) - // NamedHandler for enabling token provider - svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ - Name: enableTokenProviderHandlerName, - Fn: tp.enableTokenProviderHandler, - }) - - svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Disable the EC2 Metadata service if the environment variable is set. - // This short-circuits the service's functionality to always fail to send - // requests. - if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { - svc.Handlers.Send.SwapNamed(request.NamedHandler{ - Name: corehandlers.SendHandler.Name, - Fn: func(r *request.Request) { - r.HTTPResponse = &http.Response{ - Header: http.Header{}, - } - r.Error = awserr.New( - request.CanceledErrorCode, - "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", - nil) - }, - }) - } - - // Add additional options to the service config - for _, option := range opts { - option(svc.Client) - } - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -type tokenOutput struct { - Token string - TTL time.Duration -} - -// unmarshal token handler is used to parse the response of a getToken operation -var unmarshalTokenHandler = request.NamedHandler{ - Name: unmarshalTokenHandlerName, - Fn: func(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, - "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - - v := r.HTTPResponse.Header.Get(ttlHeader) - data, ok := r.Data.(*tokenOutput) - if !ok { - return - } - - data.Token = b.String() - // TTL is in seconds - i, err := strconv.ParseInt(v, 10, 64) - if err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, - "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - t := time.Duration(i) * time.Second - data.TTL = t - }, -} - -var unmarshalHandler = request.NamedHandler{ - Name: unmarshalMetadataHandlerName, - Fn: func(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, - "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } - }, -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), - r.HTTPResponse.StatusCode, r.RequestID) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), - r.HTTPResponse.StatusCode, r.RequestID) -} - -func validateEndpointHandler(r *request.Request) { - if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go deleted file mode 100644 index 663372a9..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ /dev/null @@ -1,92 +0,0 @@ -package ec2metadata - -import ( - "net/http" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A tokenProvider struct provides access to EC2Metadata client -// and atomic instance of a token, along with configuredTTL for it. -// tokenProvider also provides an atomic flag to disable the -// fetch token operation. -// The disabled member will use 0 as false, and 1 as true. -type tokenProvider struct { - client *EC2Metadata - token atomic.Value - configuredTTL time.Duration - disabled uint32 -} - -// A ec2Token struct helps use of token in EC2 Metadata service ops -type ec2Token struct { - token string - credentials.Expiry -} - -// newTokenProvider provides a pointer to a tokenProvider instance -func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { - return &tokenProvider{client: c, configuredTTL: duration} -} - -// fetchTokenHandler fetches token for EC2Metadata service client by default. -func (t *tokenProvider) fetchTokenHandler(r *request.Request) { - - // short-circuits to insecure data flow if tokenProvider is disabled. - if v := atomic.LoadUint32(&t.disabled); v == 1 { - return - } - - if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { - r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) - return - } - - output, err := t.client.getToken(t.configuredTTL) - - if err != nil { - - // change the disabled flag on token provider to true, - // when error is request timeout error. - if requestFailureError, ok := err.(awserr.RequestFailure); ok { - switch requestFailureError.StatusCode() { - case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: - atomic.StoreUint32(&t.disabled, 1) - case http.StatusBadRequest: - r.Error = requestFailureError - } - - // Check if request timed out while waiting for response - if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { - if e.Code() == request.ErrCodeRequestError { - atomic.StoreUint32(&t.disabled, 1) - } - } - } - return - } - - newToken := ec2Token{ - token: output.Token, - } - newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) - t.token.Store(newToken) - - // Inject token header to the request. - if ec2Token, ok := t.token.Load().(ec2Token); ok { - r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) - } -} - -// enableTokenProviderHandler enables the token provider -func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { - // If the error code status is 401, we enable the token provider - if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && - e.StatusCode() == http.StatusUnauthorized { - atomic.StoreUint32(&t.disabled, 0) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go deleted file mode 100644 index 343a2106..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ /dev/null @@ -1,216 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -type modelDefinition map[string]json.RawMessage - -// A DecodeModelOptions are the options for how the endpoints model definition -// are decoded. -type DecodeModelOptions struct { - SkipCustomizations bool -} - -// Set combines all of the option functions together. -func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// DecodeModel unmarshals a Regions and Endpoint model definition file into -// a endpoint Resolver. If the file format is not supported, or an error occurs -// when unmarshaling the model an error will be returned. -// -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. -// -// resolver, err := endpoints.DecodeModel(reader) -// -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } -func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { - var opts DecodeModelOptions - opts.Set(optFns...) - - // Get the version of the partition file to determine what - // unmarshaling model to use. - modelDef := modelDefinition{} - if err := json.NewDecoder(r).Decode(&modelDef); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - var version string - if b, ok := modelDef["version"]; ok { - version = string(b) - } else { - return nil, newDecodeModelError("endpoints version not found in model", nil) - } - - if version == "3" { - return decodeV3Endpoints(modelDef, opts) - } - - return nil, newDecodeModelError( - fmt.Sprintf("endpoints version %s, not supported", version), nil) -} - -func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { - b, ok := modelDef["partitions"] - if !ok { - return nil, newDecodeModelError("endpoints model missing partitions", nil) - } - - ps := partitions{} - if err := json.Unmarshal(b, &ps); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - if opts.SkipCustomizations { - return ps, nil - } - - // Customization - for i := 0; i < len(ps); i++ { - p := &ps[i] - custAddEC2Metadata(p) - custAddS3DualStack(p) - custRegionalS3(p) - custRmIotDataService(p) - custFixAppAutoscalingChina(p) - custFixAppAutoscalingUsGov(p) - } - - return ps, nil -} - -func custAddS3DualStack(p *partition) { - if p.ID != "aws" { - return - } - - custAddDualstack(p, "s3") - custAddDualstack(p, "s3-control") -} - -func custRegionalS3(p *partition) { - if p.ID != "aws" { - return - } - - service, ok := p.Services["s3"] - if !ok { - return - } - - // If global endpoint already exists no customization needed. - if _, ok := service.Endpoints["aws-global"]; ok { - return - } - - service.PartitionEndpoint = "aws-global" - service.Endpoints["us-east-1"] = endpoint{} - service.Endpoints["aws-global"] = endpoint{ - Hostname: "s3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - } - - p.Services["s3"] = service -} - -func custAddDualstack(p *partition, svcName string) { - s, ok := p.Services[svcName] - if !ok { - return - } - - s.Defaults.HasDualStack = boxedTrue - s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - - p.Services[svcName] = s -} - -func custAddEC2Metadata(p *partition) { - p.Services["ec2metadata"] = service{ - IsRegionalized: boxedFalse, - PartitionEndpoint: "aws-global", - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - } -} - -func custRmIotDataService(p *partition) { - delete(p.Services, "data.iot") -} - -func custFixAppAutoscalingChina(p *partition) { - if p.ID != "aws-cn" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - const expectHostname = `autoscaling.{region}.amazonaws.com` - if e, a := s.Defaults.Hostname, expectHostname; e != a { - fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) - return - } - - s.Defaults.Hostname = expectHostname + ".cn" - p.Services[serviceName] = s -} - -func custFixAppAutoscalingUsGov(p *partition) { - if p.ID != "aws-us-gov" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - if a := s.Defaults.CredentialScope.Service; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) - return - } - - if a := s.Defaults.Hostname; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) - return - } - - s.Defaults.CredentialScope.Service = "application-autoscaling" - s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" - - p.Services[serviceName] = s -} - -type decodeModelError struct { - awsError -} - -func newDecodeModelError(msg string, err error) decodeModelError { - return decodeModelError{ - awsError: awserr.New("DecodeEndpointsModelError", msg, err), - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go deleted file mode 100644 index 9088b3e3..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ /dev/null @@ -1,6404 +0,0 @@ -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - -// Partition identifiers -const ( - AwsPartitionID = "aws" // AWS Standard partition. - AwsCnPartitionID = "aws-cn" // AWS China partition. - AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. - AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. - AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. -) - -// AWS Standard partition's regions. -const ( - ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). - ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). - ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). - ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). - ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). - ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). - CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). - EuNorth1RegionID = "eu-north-1" // EU (Stockholm). - EuWest1RegionID = "eu-west-1" // EU (Ireland). - EuWest2RegionID = "eu-west-2" // EU (London). - EuWest3RegionID = "eu-west-3" // EU (Paris). - MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). - SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). - UsEast1RegionID = "us-east-1" // US East (N. Virginia). - UsEast2RegionID = "us-east-2" // US East (Ohio). - UsWest1RegionID = "us-west-1" // US West (N. California). - UsWest2RegionID = "us-west-2" // US West (Oregon). -) - -// AWS China partition's regions. -const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). - CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). -) - -// AWS GovCloud (US) partition's regions. -const ( - UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). -) - -// AWS ISO (US) partition's regions. -const ( - UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. -) - -// AWS ISOB (US) partition's regions. -const ( - UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). -) - -// DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). -// -// Use DefaultPartitions() to get the list of the default partitions. -func DefaultResolver() Resolver { - return defaultPartitions -} - -// DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). -// -// partitions := endpoints.DefaultPartitions -// for _, p := range partitions { -// // ... inspect partitions -// } -func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() -} - -var defaultPartitions = partitions{ - awsPartition, - awscnPartition, - awsusgovPartition, - awsisoPartition, - awsisobPartition, -} - -// AwsPartition returns the Resolver for AWS Standard. -func AwsPartition() Partition { - return awsPartition.Partition() -} - -var awsPartition = partition{ - ID: "aws", - Name: "AWS Standard", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "ap-east-1": region{ - Description: "Asia Pacific (Hong Kong)", - }, - "ap-northeast-1": region{ - Description: "Asia Pacific (Tokyo)", - }, - "ap-northeast-2": region{ - Description: "Asia Pacific (Seoul)", - }, - "ap-south-1": region{ - Description: "Asia Pacific (Mumbai)", - }, - "ap-southeast-1": region{ - Description: "Asia Pacific (Singapore)", - }, - "ap-southeast-2": region{ - Description: "Asia Pacific (Sydney)", - }, - "ca-central-1": region{ - Description: "Canada (Central)", - }, - "eu-central-1": region{ - Description: "EU (Frankfurt)", - }, - "eu-north-1": region{ - Description: "EU (Stockholm)", - }, - "eu-west-1": region{ - Description: "EU (Ireland)", - }, - "eu-west-2": region{ - Description: "EU (London)", - }, - "eu-west-3": region{ - Description: "EU (Paris)", - }, - "me-south-1": region{ - Description: "Middle East (Bahrain)", - }, - "sa-east-1": region{ - Description: "South America (Sao Paulo)", - }, - "us-east-1": region{ - Description: "US East (N. Virginia)", - }, - "us-east-2": region{ - Description: "US East (Ohio)", - }, - "us-west-1": region{ - Description: "US West (N. California)", - }, - "us-west-2": region{ - Description: "US West (Oregon)", - }, - }, - Services: services{ - "a4b": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "access-analyzer": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "acm": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "acm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "acm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "acm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "acm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "acm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "acm-pca-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "acm-pca-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "acm-pca-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "acm-pca-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.ecr": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "api.ecr.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - "ap-northeast-1": endpoint{ - Hostname: "api.ecr.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "api.ecr.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "api.ecr.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "api.ecr.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "api.ecr.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "api.ecr.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "api.ecr.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "api.ecr.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "api.ecr.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "api.ecr.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "api.ecr.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "me-south-1": endpoint{ - Hostname: "api.ecr.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "api.ecr.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "api.ecr.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "api.ecr.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "api.ecr.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "api.ecr.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.mediatailor": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.pricing": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appmesh": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips": endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appsync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "backup": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "budgets.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "ce.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "chime": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "service.chime.aws.amazon.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "service.chime.aws.amazon.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloud9": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudsearch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "codepipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codestar": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-sync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "comprehendmedical": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "connect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cur": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "data.mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dataexchange": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "datasync": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dax": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "discovery": service{ - - Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "docdb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elastictranscoder": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "forecast": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "forecastquery": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fsx": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "groundstation": service{ - - Endpoints: endpoints{ - "eu-north-1": endpoint{}, - "me-south-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotanalytics": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotevents": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ioteventsdata": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "eu-central-1": endpoint{ - Hostname: "data.iotevents.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "data.iotevents.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "data.iotevents.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "data.iotevents.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "data.iotevents.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "data.iotevents.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotthingsgraph": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "iotthingsgraph", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kafka": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisvideo": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lakeformation": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lightsail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "mediaconnect": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "medialive": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediapackage": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mgh": service{ - - Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mobileanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mq": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "rds.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "me-south-1": endpoint{ - Hostname: "rds.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "oidc": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "opsworks": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "opsworks-cm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "outposts": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "pinpoint.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "pinpoint.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "portal.sso": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "projects.iot1click": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ram": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "resource-groups": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "robomaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "route53resolver": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "s3": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "aws-global": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-1": endpoint{ - Hostname: "s3.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-west-2": endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3-control.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "s3-control.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "s3-control.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "s3-control.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3-control.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "s3-control.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "s3-control.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "s3-control.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "s3-control.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "s3-control.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3-control.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "s3-control.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "s3-control.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-east-2-fips": endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "s3-control.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-west-2-fips": endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "savingsplans": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "savingsplans.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "schemas": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "securityhub": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ap-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ca-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-north-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-2": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-3": endpoint{ - Protocols: []string{"https"}, - }, - "me-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "sa-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-2": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-2": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "servicediscovery": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "session.qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "shield": service{ - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "aws-global": endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-global", - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "support.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transcribestreaming": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transfer": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workdocs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workmail": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} - -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", - }, - }, - Services: services{ - "api.ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "appsync": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dax": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "s3-control.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Protocols: []string{"https"}, - }, - "cn-northwest-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-cn-global", - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "support.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-gov-east-1": region{ - Description: "AWS GovCloud (US-East)", - }, - "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", - }, - }, - Services: services{ - "access-analyzer": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "acm": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "api.ecr": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "api.ecr.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "api.ecr.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "autoscaling": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "comprehendmedical": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "datasync": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "ram": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "resource-groups": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "resource-groups.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "resource-groups.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "route53resolver": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips-us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "s3-control.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3-control.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-gov-west-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "sns": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sqs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - }, -} - -// AwsIsoPartition returns the Resolver for AWS ISO (US). -func AwsIsoPartition() Partition { - return awsisoPartition.Partition() -} - -var awsisoPartition = partition{ - ID: "aws-iso", - Name: "AWS ISO (US)", - DNSSuffix: "c2s.ic.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-iso-east-1": region{ - Description: "US ISO East", - }, - }, - Services: services{ - "api.ecr": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "autoscaling": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "iam.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - "us-iso-east-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "route53.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "sns": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sqs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-global", - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "support.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - }, -} - -// AwsIsoBPartition returns the Resolver for AWS ISOB (US). -func AwsIsoBPartition() Partition { - return awsisobPartition.Partition() -} - -var awsisobPartition = partition{ - ID: "aws-iso-b", - Name: "AWS ISOB (US)", - DNSSuffix: "sc2s.sgov.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-isob-east-1": region{ - Description: "US ISOB East (Ohio)", - }, - }, - Services: services{ - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-b-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ - Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - "us-isob-east-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-b-global", - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ - Hostname: "support.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go deleted file mode 100644 index ca8fc828..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go +++ /dev/null @@ -1,141 +0,0 @@ -package endpoints - -// Service identifiers -// -// Deprecated: Use client package's EndpointsID value instead of these -// ServiceIDs. These IDs are not maintained, and are out of date. -const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AppsyncServiceID = "appsync" // Appsync. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - ChimeServiceID = "chime" // Chime. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - IotanalyticsServiceID = "iotanalytics" // Iotanalytics. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - S3ControlServiceID = "s3-control" // S3Control. - SagemakerServiceID = "api.sagemaker" // Sagemaker. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TransferServiceID = "transfer" // Transfer. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go deleted file mode 100644 index 84316b92..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package endpoints provides the types and functionality for defining regions -// and endpoints, as well as querying those definitions. -// -// The SDK's Regions and Endpoints metadata is code generated into the endpoints -// package, and is accessible via the DefaultResolver function. This function -// returns a endpoint Resolver will search the metadata and build an associated -// endpoint if one is found. The default resolver will search all partitions -// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and -// AWS GovCloud (US) (aws-us-gov). -// . -// -// Enumerating Regions and Endpoint Metadata -// -// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface -// will allow you to get access to the list of underlying Partitions with the -// Partitions method. This is helpful if you want to limit the SDK's endpoint -// resolving to a single partition, or enumerate regions, services, and endpoints -// in the partition. -// -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } -// -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } -// -// Using Custom Endpoints -// -// The endpoints package also gives you the ability to use your own logic how -// endpoints are resolved. This is a great way to define a custom endpoint -// for select services, without passing that logic down through your code. -// -// If a type implements the Resolver interface it can be used to resolve -// endpoints. To use this with the SDK's Session and Config set the value -// of the type to the EndpointsResolver field of aws.Config when initializing -// the session, or service client. -// -// In addition the ResolverFunc is a wrapper for a func matching the signature -// of Resolver.EndpointFor, converting it to a type that satisfies the -// Resolver interface. -// -// -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } -// -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) -package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go deleted file mode 100644 index ca956e5f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ /dev/null @@ -1,564 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Options provide the configuration needed to direct how the -// endpoints will be resolved. -type Options struct { - // DisableSSL forces the endpoint to be resolved as HTTP. - // instead of HTTPS if the service supports it. - DisableSSL bool - - // Sets the resolver to resolve the endpoint as a dualstack endpoint - // for the service. If dualstack support for a service is not known and - // StrictMatching is not enabled a dualstack endpoint for the service will - // be returned. This endpoint may not be valid. If StrictMatching is - // enabled only services that are known to support dualstack will return - // dualstack endpoints. - UseDualStack bool - - // Enables strict matching of services and regions resolved endpoints. - // If the partition doesn't enumerate the exact service and region an - // error will be returned. This option will prevent returning endpoints - // that look valid, but may not resolve to any real endpoint. - StrictMatching bool - - // Enables resolving a service endpoint based on the region provided if the - // service does not exist. The service endpoint ID will be used as the service - // domain name prefix. By default the endpoint resolver requires the service - // to be known when resolving endpoints. - // - // If resolving an endpoint on the partition list the provided region will - // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unknown and resolving - // the endpoint on partition list an UnknownEndpointError error will be returned. - // - // If resolving and endpoint on a partition specific resolver that partition's - // domain name pattern will be used with the service endpoint ID. If both - // region and service do not exist when resolving an endpoint on a specific - // partition the partition's domain pattern will be used to combine the - // endpoint and region together. - // - // This option is ignored if StrictMatching is enabled. - ResolveUnknownService bool - - // STS Regional Endpoint flag helps with resolving the STS endpoint - STSRegionalEndpoint STSRegionalEndpoint - - // S3 Regional Endpoint flag helps with resolving the S3 endpoint - S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint -} - -// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint -// options. -type STSRegionalEndpoint int - -func (e STSRegionalEndpoint) String() string { - switch e { - case LegacySTSEndpoint: - return "legacy" - case RegionalSTSEndpoint: - return "regional" - case UnsetSTSEndpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. - UnsetSTSEndpoint STSRegionalEndpoint = iota - - // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified - // to use legacy endpoints. - LegacySTSEndpoint - - // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified - // to use regional endpoints. - RegionalSTSEndpoint -) - -// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the STS regional Endpoint flag. -func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacySTSEndpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalSTSEndpoint, nil - default: - return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) - } -} - -// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 -// Regional Endpoint options. -type S3UsEast1RegionalEndpoint int - -func (e S3UsEast1RegionalEndpoint) String() string { - switch e { - case LegacyS3UsEast1Endpoint: - return "legacy" - case RegionalS3UsEast1Endpoint: - return "regional" - case UnsetS3UsEast1Endpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not - // specified. - UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota - - // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use legacy endpoints. - LegacyS3UsEast1Endpoint - - // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use regional endpoints. - RegionalS3UsEast1Endpoint -) - -// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the S3 regional Endpoint flag. -func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacyS3UsEast1Endpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalS3UsEast1Endpoint, nil - default: - return UnsetS3UsEast1Endpoint, - fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) - } -} - -// Set combines all of the option functions together. -func (o *Options) Set(optFns ...func(*Options)) { - for _, fn := range optFns { - fn(o) - } -} - -// DisableSSLOption sets the DisableSSL options. Can be used as a functional -// option when resolving endpoints. -func DisableSSLOption(o *Options) { - o.DisableSSL = true -} - -// UseDualStackOption sets the UseDualStack option. Can be used as a functional -// option when resolving endpoints. -func UseDualStackOption(o *Options) { - o.UseDualStack = true -} - -// StrictMatchingOption sets the StrictMatching option. Can be used as a functional -// option when resolving endpoints. -func StrictMatchingOption(o *Options) { - o.StrictMatching = true -} - -// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used -// as a functional option when resolving endpoints. -func ResolveUnknownServiceOption(o *Options) { - o.ResolveUnknownService = true -} - -// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve -// STS endpoint to their regional endpoint, instead of the global endpoint. -func STSRegionalEndpointOption(o *Options) { - o.STSRegionalEndpoint = RegionalSTSEndpoint -} - -// A Resolver provides the interface for functionality to resolve endpoints. -// The build in Partition and DefaultResolver return value satisfy this interface. -type Resolver interface { - EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) -} - -// ResolverFunc is a helper utility that wraps a function so it satisfies the -// Resolver interface. This is useful when you want to add additional endpoint -// resolving logic, or stub out specific endpoints with custom values. -type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) - -// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. -func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return fn(service, region, opts...) -} - -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. -// -// If disableSSL is set, it will only set the URL's scheme if the URL does not -// contain a scheme. -func AddScheme(endpoint string, disableSSL bool) string { - if !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} - -// EnumPartitions a provides a way to retrieve the underlying partitions that -// make up the SDK's default Resolver, or any resolver decoded from a model -// file. -// -// Use this interface with DefaultResolver and DecodeModels to get the list of -// Partitions. -type EnumPartitions interface { - Partitions() []Partition -} - -// RegionsForService returns a map of regions for the partition and service. -// If either the partition or service does not exist false will be returned -// as the second parameter. -// -// This example shows how to get the regions for DynamoDB in the AWS partition. -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) -// -// This is equivalent to using the partition directly. -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() -func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { - for _, p := range ps { - if p.ID() != partitionID { - continue - } - if _, ok := p.p.Services[serviceID]; !ok { - break - } - - s := Service{ - id: serviceID, - p: p.p, - } - return s.Regions(), true - } - - return map[string]Region{}, false -} - -// PartitionForRegion returns the first partition which includes the region -// passed in. This includes both known regions and regions which match -// a pattern supported by the partition which may include regions that are -// not explicitly known by the partition. Use the Regions method of the -// returned Partition if explicit support is needed. -func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { - for _, p := range ps { - if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { - return p, true - } - } - - return Partition{}, false -} - -// A Partition provides the ability to enumerate the partition's regions -// and services. -type Partition struct { - id, dnsSuffix string - p *partition -} - -// DNSSuffix returns the base domain name of the partition. -func (p Partition) DNSSuffix() string { return p.dnsSuffix } - -// ID returns the identifier of the partition. -func (p Partition) ID() string { return p.id } - -// EndpointFor attempts to resolve the endpoint based on service and region. -// See Options for information on configuring how the endpoint is resolved. -// -// If the service cannot be found in the metadata the UnknownServiceError -// error will be returned. This validation will occur regardless if -// StrictMatching is enabled. To enable resolving unknown services set the -// "ResolveUnknownService" option to true. When StrictMatching is disabled -// this option allows the partition resolver to resolve a endpoint based on -// the service endpoint ID provided. -// -// When resolving endpoints you can choose to enable StrictMatching. This will -// require the provided service and region to be known by the partition. -// If the endpoint cannot be strictly resolved an error will be returned. This -// mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned may look valid but may not work. -// StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expansions. -// -// Errors that can be returned. -// * UnknownServiceError -// * UnknownEndpointError -func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return p.p.EndpointFor(service, region, opts...) -} - -// Regions returns a map of Regions indexed by their ID. This is useful for -// enumerating over the regions in a partition. -func (p Partition) Regions() map[string]Region { - rs := make(map[string]Region, len(p.p.Regions)) - for id, r := range p.p.Regions { - rs[id] = Region{ - id: id, - desc: r.Description, - p: p.p, - } - } - - return rs -} - -// Services returns a map of Service indexed by their ID. This is useful for -// enumerating over the services in a partition. -func (p Partition) Services() map[string]Service { - ss := make(map[string]Service, len(p.p.Services)) - for id := range p.p.Services { - ss[id] = Service{ - id: id, - p: p.p, - } - } - - return ss -} - -// A Region provides information about a region, and ability to resolve an -// endpoint from the context of a region, given a service. -type Region struct { - id, desc string - p *partition -} - -// ID returns the region's identifier. -func (r Region) ID() string { return r.id } - -// Description returns the region's description. The region description -// is free text, it can be empty, and it may change between SDK releases. -func (r Region) Description() string { return r.desc } - -// ResolveEndpoint resolves an endpoint from the context of the region given -// a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return r.p.EndpointFor(service, r.id, opts...) -} - -// Services returns a list of all services that are known to be in this region. -func (r Region) Services() map[string]Service { - ss := map[string]Service{} - for id, s := range r.p.Services { - if _, ok := s.Endpoints[r.id]; ok { - ss[id] = Service{ - id: id, - p: r.p, - } - } - } - - return ss -} - -// A Service provides information about a service, and ability to resolve an -// endpoint from the context of a service, given a region. -type Service struct { - id string - p *partition -} - -// ID returns the identifier for the service. -func (s Service) ID() string { return s.id } - -// ResolveEndpoint resolves an endpoint from the context of a service given -// a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return s.p.EndpointFor(s.id, region, opts...) -} - -// Regions returns a map of Regions that the service is present in. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Regions() map[string]Region { - rs := map[string]Region{} - for id := range s.p.Services[s.id].Endpoints { - if r, ok := s.p.Regions[id]; ok { - rs[id] = Region{ - id: id, - desc: r.Description, - p: s.p, - } - } - } - - return rs -} - -// Endpoints returns a map of Endpoints indexed by their ID for all known -// endpoints for a service. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Endpoints() map[string]Endpoint { - es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) - for id := range s.p.Services[s.id].Endpoints { - es[id] = Endpoint{ - id: id, - serviceID: s.id, - p: s.p, - } - } - - return es -} - -// A Endpoint provides information about endpoints, and provides the ability -// to resolve that endpoint for the service, and the region the endpoint -// represents. -type Endpoint struct { - id string - serviceID string - p *partition -} - -// ID returns the identifier for an endpoint. -func (e Endpoint) ID() string { return e.id } - -// ServiceID returns the identifier the endpoint belongs to. -func (e Endpoint) ServiceID() string { return e.serviceID } - -// ResolveEndpoint resolves an endpoint from the context of a service and -// region the endpoint represents. See Partition.EndpointFor for usage and -// errors that can be returned. -func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { - return e.p.EndpointFor(e.serviceID, e.id, opts...) -} - -// A ResolvedEndpoint is an endpoint that has been resolved based on a partition -// service, and region. -type ResolvedEndpoint struct { - // The endpoint URL - URL string - - // The endpoint partition - PartitionID string - - // The region that should be used for signing requests. - SigningRegion string - - // The service name that should be used for signing requests. - SigningName string - - // States that the signing name for this endpoint was derived from metadata - // passed in, but was not explicitly modeled. - SigningNameDerived bool - - // The signing method that should be used for signing requests. - SigningMethod string -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A EndpointNotFoundError is returned when in StrictMatching mode, and the -// endpoint for the service and region cannot be found in any of the partitions. -type EndpointNotFoundError struct { - awsError - Partition string - Service string - Region string -} - -// A UnknownServiceError is returned when the service does not resolve to an -// endpoint. Includes a list of all known services for the partition. Returned -// when a partition does not support the service. -type UnknownServiceError struct { - awsError - Partition string - Service string - Known []string -} - -// NewUnknownServiceError builds and returns UnknownServiceError. -func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { - return UnknownServiceError{ - awsError: awserr.New("UnknownServiceError", - "could not resolve endpoint for unknown service", nil), - Partition: p, - Service: s, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownServiceError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q", - e.Partition, e.Service) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownServiceError) String() string { - return e.Error() -} - -// A UnknownEndpointError is returned when in StrictMatching mode and the -// service is valid, but the region does not resolve to an endpoint. Includes -// a list of all known endpoints for the service. -type UnknownEndpointError struct { - awsError - Partition string - Service string - Region string - Known []string -} - -// NewUnknownEndpointError builds and returns UnknownEndpointError. -func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { - return UnknownEndpointError{ - awsError: awserr.New("UnknownEndpointError", - "could not resolve endpoint", nil), - Partition: p, - Service: s, - Region: r, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q, region: %q", - e.Partition, e.Service, e.Region) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) String() string { - return e.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go deleted file mode 100644 index df75e899..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go +++ /dev/null @@ -1,24 +0,0 @@ -package endpoints - -var legacyGlobalRegions = map[string]map[string]struct{}{ - "sts": { - "ap-northeast-1": {}, - "ap-south-1": {}, - "ap-southeast-1": {}, - "ap-southeast-2": {}, - "ca-central-1": {}, - "eu-central-1": {}, - "eu-north-1": {}, - "eu-west-1": {}, - "eu-west-2": {}, - "eu-west-3": {}, - "sa-east-1": {}, - "us-east-1": {}, - "us-east-2": {}, - "us-west-1": {}, - "us-west-2": {}, - }, - "s3": { - "us-east-1": {}, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go deleted file mode 100644 index eb2ac83c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ /dev/null @@ -1,341 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -type partitions []partition - -func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - var opt Options - opt.Set(opts...) - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { - continue - } - - return ps[i].EndpointFor(service, region, opts...) - } - - // If loose matching fallback to first partition format to use - // when resolving the endpoint. - if !opt.StrictMatching && len(ps) > 0 { - return ps[0].EndpointFor(service, region, opts...) - } - - return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) -} - -// Partitions satisfies the EnumPartitions interface and returns a list -// of Partitions representing each partition represented in the SDK's -// endpoints model. -func (ps partitions) Partitions() []Partition { - parts := make([]Partition, 0, len(ps)) - for i := 0; i < len(ps); i++ { - parts = append(parts, ps[i].Partition()) - } - - return parts -} - -type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpoint `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` -} - -func (p partition) Partition() Partition { - return Partition{ - dnsSuffix: p.DNSSuffix, - id: p.ID, - p: &p, - } -} - -func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { - s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[region] - - if hasEndpoint && hasService { - return true - } - - if strictMatch { - return false - } - - return p.RegionRegex.MatchString(region) -} - -func allowLegacyEmptyRegion(service string) bool { - legacy := map[string]struct{}{ - "budgets": {}, - "ce": {}, - "chime": {}, - "cloudfront": {}, - "ec2metadata": {}, - "iam": {}, - "importexport": {}, - "organizations": {}, - "route53": {}, - "sts": {}, - "support": {}, - "waf": {}, - } - - _, allowed := legacy[service] - return allowed -} - -func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { - var opt Options - opt.Set(opts...) - - s, hasService := p.Services[service] - if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { - // Only return error if the resolver will not fallback to creating - // endpoint based on service endpoint ID passed in. - return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) - } - - if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { - region = s.PartitionEndpoint - } - - if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || - (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { - if _, ok := legacyGlobalRegions[service][region]; ok { - region = "aws-global" - } - } - - e, hasEndpoint := s.endpointForRegion(region) - if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) - } - - defs := []endpoint{p.Defaults, s.Defaults} - - return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil -} - -func serviceList(ss services) []string { - list := make([]string, 0, len(ss)) - for k := range ss { - list = append(list, k) - } - return list -} -func endpointList(es endpoints) []string { - list := make([]string, 0, len(es)) - for k := range es { - list = append(list, k) - } - return list -} - -type regionRegex struct { - *regexp.Regexp -} - -func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { - // Strip leading and trailing quotes - regex, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("unable to strip quotes from regex, %v", err) - } - - rr.Regexp, err = regexp.Compile(regex) - if err != nil { - return fmt.Errorf("unable to unmarshal region regex, %v", err) - } - return nil -} - -type regions map[string]region - -type region struct { - Description string `json:"description"` -} - -type services map[string]service - -type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpoint `json:"defaults"` - Endpoints endpoints `json:"endpoints"` -} - -func (s *service) endpointForRegion(region string) (endpoint, bool) { - if s.IsRegionalized == boxedFalse { - return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint - } - - if e, ok := s.Endpoints[region]; ok { - return e, true - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return endpoint{}, false -} - -type endpoints map[string]endpoint - -type endpoint struct { - Hostname string `json:"hostname"` - Protocols []string `json:"protocols"` - CredentialScope credentialScope `json:"credentialScope"` - - // Custom fields not modeled - HasDualStack boxedBool `json:"-"` - DualStackHostname string `json:"-"` - - // Signature Version not used - SignatureVersions []string `json:"signatureVersions"` - - // SSLCommonName not used. - SSLCommonName string `json:"sslCommonName"` -} - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "v2"} -) - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { - var merged endpoint - for _, def := range defs { - merged.mergeIn(def) - } - merged.mergeIn(e) - e = merged - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - - hostname := e.Hostname - // Offset the hostname for dualstack if enabled - if opts.UseDualStack && e.HasDualStack == boxedTrue { - hostname = e.DualStackHostname - region = signingRegion - } - - u := strings.Replace(hostname, "{service}", service, 1) - u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) - - scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) - u = fmt.Sprintf("%s://%s", scheme, u) - - return ResolvedEndpoint{ - URL: u, - PartitionID: partitionID, - SigningRegion: signingRegion, - SigningName: signingName, - SigningNameDerived: signingNameDerived, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } -} - -func getEndpointScheme(protocols []string, disableSSL bool) string { - if disableSSL { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func (e *endpoint) mergeIn(other endpoint) { - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SSLCommonName) > 0 { - e.SSLCommonName = other.SSLCommonName - } - if other.HasDualStack != boxedBoolUnset { - e.HasDualStack = other.HasDualStack - } - if len(other.DualStackHostname) > 0 { - e.DualStackHostname = other.DualStackHostname - } -} - -type credentialScope struct { - Region string `json:"region"` - Service string `json:"service"` -} - -type boxedBool int - -func (b *boxedBool) UnmarshalJSON(buf []byte) error { - v, err := strconv.ParseBool(string(buf)) - if err != nil { - return err - } - - if v { - *b = boxedTrue - } else { - *b = boxedFalse - } - - return nil -} - -const ( - boxedBoolUnset boxedBool = iota - boxedFalse - boxedTrue -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go deleted file mode 100644 index 0fdfcc56..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ /dev/null @@ -1,351 +0,0 @@ -// +build codegen - -package endpoints - -import ( - "fmt" - "io" - "reflect" - "strings" - "text/template" - "unicode" -) - -// A CodeGenOptions are the options for code generating the endpoints into -// Go code from the endpoints model definition. -type CodeGenOptions struct { - // Options for how the model will be decoded. - DecodeModelOptions DecodeModelOptions - - // Disables code generation of the service endpoint prefix IDs defined in - // the model. - DisableGenerateServiceIDs bool -} - -// Set combines all of the option functions together -func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// CodeGenModel given a endpoints model file will decode it and attempt to -// generate Go code from the model definition. Error will be returned if -// the code is unable to be generated, or decoded. -func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { - var opts CodeGenOptions - opts.Set(optFns...) - - resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { - *d = opts.DecodeModelOptions - }) - if err != nil { - return err - } - - v := struct { - Resolver - CodeGenOptions - }{ - Resolver: resolver, - CodeGenOptions: opts, - } - - tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { - return fmt.Errorf("failed to execute template, %v", err) - } - - return nil -} - -func toSymbol(v string) string { - out := []rune{} - for _, c := range strings.Title(v) { - if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { - continue - } - - out = append(out, c) - } - - return string(out) -} - -func quoteString(v string) string { - return fmt.Sprintf("%q", v) -} - -func regionConstName(p, r string) string { - return toSymbol(p) + toSymbol(r) -} - -func partitionGetter(id string) string { - return fmt.Sprintf("%sPartition", toSymbol(id)) -} - -func partitionVarName(id string) string { - return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) -} - -func listPartitionNames(ps partitions) string { - names := []string{} - switch len(ps) { - case 1: - return ps[0].Name - case 2: - return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) - default: - for i, p := range ps { - if i == len(ps)-1 { - names = append(names, "and "+p.Name) - } else { - names = append(names, p.Name) - } - } - return strings.Join(names, ", ") - } -} - -func boxedBoolIfSet(msg string, v boxedBool) string { - switch v { - case boxedTrue: - return fmt.Sprintf(msg, "boxedTrue") - case boxedFalse: - return fmt.Sprintf(msg, "boxedFalse") - default: - return "" - } -} - -func stringIfSet(msg, v string) string { - if len(v) == 0 { - return "" - } - - return fmt.Sprintf(msg, v) -} - -func stringSliceIfSet(msg string, vs []string) string { - if len(vs) == 0 { - return "" - } - - names := []string{} - for _, v := range vs { - names = append(names, `"`+v+`"`) - } - - return fmt.Sprintf(msg, strings.Join(names, ",")) -} - -func endpointIsSet(v endpoint) bool { - return !reflect.DeepEqual(v, endpoint{}) -} - -func serviceSet(ps partitions) map[string]struct{} { - set := map[string]struct{}{} - for _, p := range ps { - for id := range p.Services { - set[id] = struct{}{} - } - } - - return set -} - -var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, -} - -const v3Tmpl = ` -{{ define "defaults" -}} -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - - {{ template "partition consts" $.Resolver }} - - {{ range $_, $partition := $.Resolver }} - {{ template "partition region consts" $partition }} - {{ end }} - - {{ if not $.DisableGenerateServiceIDs -}} - {{ template "service consts" $.Resolver }} - {{- end }} - - {{ template "endpoint resolvers" $.Resolver }} -{{- end }} - -{{ define "partition consts" }} - // Partition identifiers - const ( - {{ range $_, $p := . -}} - {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. - {{ end -}} - ) -{{- end }} - -{{ define "partition region consts" }} - // {{ .Name }} partition's regions. - const ( - {{ range $id, $region := .Regions -}} - {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. - {{ end -}} - ) -{{- end }} - -{{ define "service consts" }} - // Service identifiers - const ( - {{ $serviceSet := ServicesSet . -}} - {{ range $id, $_ := $serviceSet -}} - {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. - {{ end -}} - ) -{{- end }} - -{{ define "endpoint resolvers" }} - // DefaultResolver returns an Endpoint resolver that will be able - // to resolve endpoints for: {{ ListPartitionNames . }}. - // - // Use DefaultPartitions() to get the list of the default partitions. - func DefaultResolver() Resolver { - return defaultPartitions - } - - // DefaultPartitions returns a list of the partitions the SDK is bundled - // with. The available partitions are: {{ ListPartitionNames . }}. - // - // partitions := endpoints.DefaultPartitions - // for _, p := range partitions { - // // ... inspect partitions - // } - func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() - } - - var defaultPartitions = partitions{ - {{ range $_, $partition := . -}} - {{ PartitionVarName $partition.ID }}, - {{ end }} - } - - {{ range $_, $partition := . -}} - {{ $name := PartitionGetter $partition.ID -}} - // {{ $name }} returns the Resolver for {{ $partition.Name }}. - func {{ $name }}() Partition { - return {{ PartitionVarName $partition.ID }}.Partition() - } - var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} - {{ end }} -{{ end }} - -{{ define "default partitions" }} - func DefaultPartitions() []Partition { - return []partition{ - {{ range $_, $partition := . -}} - // {{ ToSymbol $partition.ID}}Partition(), - {{ end }} - } - } -{{ end }} - -{{ define "gocode Partition" -}} -partition{ - {{ StringIfSet "ID: %q,\n" .ID -}} - {{ StringIfSet "Name: %q,\n" .Name -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults }}, - {{- end }} - Regions: {{ template "gocode Regions" .Regions }}, - Services: {{ template "gocode Services" .Services }}, -} -{{- end }} - -{{ define "gocode RegionRegex" -}} -regionRegex{ - Regexp: func() *regexp.Regexp{ - reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) - return reg - }(), -} -{{- end }} - -{{ define "gocode Regions" -}} -regions{ - {{ range $id, $region := . -}} - "{{ $id }}": {{ template "gocode Region" $region }}, - {{ end -}} -} -{{- end }} - -{{ define "gocode Region" -}} -region{ - {{ StringIfSet "Description: %q,\n" .Description -}} -} -{{- end }} - -{{ define "gocode Services" -}} -services{ - {{ range $id, $service := . -}} - "{{ $id }}": {{ template "gocode Service" $service }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Service" -}} -service{ - {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} - {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults -}}, - {{- end }} - {{ if .Endpoints -}} - Endpoints: {{ template "gocode Endpoints" .Endpoints }}, - {{- end }} -} -{{- end }} - -{{ define "gocode Endpoints" -}} -endpoints{ - {{ range $id, $endpoint := . -}} - "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoint" -}} -endpoint{ - {{ StringIfSet "Hostname: %q,\n" .Hostname -}} - {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} - {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} - {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} - {{ if or .CredentialScope.Region .CredentialScope.Service -}} - CredentialScope: credentialScope{ - {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} - {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} - }, - {{- end }} - {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} - {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} - -} -{{- end }} -` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go deleted file mode 100644 index fa06f7a8..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go deleted file mode 100644 index 91a6f277..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go +++ /dev/null @@ -1,12 +0,0 @@ -package aws - -// JSONValue is a representation of a grab bag type that will be marshaled -// into a json string. This type can be used just like any other map. -// -// Example: -// -// values := aws.JSONValue{ -// "Foo": "Bar", -// } -// values["Baz"] = "Qux" -type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go deleted file mode 100644 index 6ed15b2e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ /dev/null @@ -1,118 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevelType defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevelType uint - -// LogLevel returns the pointer to a LogLevel. Should be used to workaround -// not being able to take the address of a non-composite literal. -func LogLevel(l LogLevelType) *LogLevelType { - return &l -} - -// Value returns the LogLevel value or the default value LogOff if the LogLevel -// is nil. Safe to use on nil value LogLevelTypes. -func (l *LogLevelType) Value() LogLevelType { - if l != nil { - return *l - } - return LogOff -} - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nil, will default to LogOff comparison. -func (l *LogLevelType) Matches(v LogLevelType) bool { - c := l.Value() - return c&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default -// to LogOff comparison. -func (l *LogLevelType) AtLeast(v LogLevelType) bool { - c := l.Value() - return c >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevelType = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors - - // LogDebugWithEventStreamBody states the SDK should log EventStream - // request and response bodys. This should be used to log the EventStream - // wire unmarshaled message content of requests and responses made while - // using the SDK Will also enable LogDebug. - LogDebugWithEventStreamBody -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go deleted file mode 100644 index d9b37f4d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ /dev/null @@ -1,18 +0,0 @@ -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - if strings.Contains(err.Error(), "read: connection reset") { - return false - } - - if strings.Contains(err.Error(), "connection reset") || - strings.Contains(err.Error(), "broken pipe") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go deleted file mode 100644 index e819ab6c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ /dev/null @@ -1,343 +0,0 @@ -package request - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - BuildStream HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalStream HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList - CompleteAttempt HandlerList - Complete HandlerList -} - -// Copy returns a copy of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - BuildStream: h.BuildStream.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalStream: h.UnmarshalStream.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - CompleteAttempt: h.CompleteAttempt.copy(), - Complete: h.Complete.copy(), - } -} - -// Clear removes callback functions for all handlers. -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.BuildStream.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalStream.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() - h.CompleteAttempt.Clear() - h.Complete.Clear() -} - -// IsEmpty returns if there are no handlers in any of the handlerlists. -func (h *Handlers) IsEmpty() bool { - if h.Validate.Len() != 0 { - return false - } - if h.Build.Len() != 0 { - return false - } - if h.BuildStream.Len() != 0 { - return false - } - if h.Send.Len() != 0 { - return false - } - if h.Sign.Len() != 0 { - return false - } - if h.Unmarshal.Len() != 0 { - return false - } - if h.UnmarshalStream.Len() != 0 { - return false - } - if h.UnmarshalMeta.Len() != 0 { - return false - } - if h.UnmarshalError.Len() != 0 { - return false - } - if h.ValidateResponse.Len() != 0 { - return false - } - if h.Retry.Len() != 0 { - return false - } - if h.AfterRetry.Len() != 0 { - return false - } - if h.CompleteAttempt.Len() != 0 { - return false - } - if h.Complete.Len() != 0 { - return false - } - - return true -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - if len(l.list) == 0 { - return n - } - - n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = l.list[0:0] -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.PushBackNamed(NamedHandler{"__anonymous", f}) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - if cap(l.list) == 0 { - l.list = make([]NamedHandler, 0, 5) - } - l.list = append(l.list, n) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.PushFrontNamed(NamedHandler{"__anonymous", f}) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - if cap(l.list) == len(l.list) { - // Allocating new list required - l.list = append([]NamedHandler{n}, l.list...) - } else { - // Enough room to prepend into list. - l.list = append(l.list, NamedHandler{}) - copy(l.list[1:], l.list) - l.list[0] = n - } -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - l.RemoveByName(n.Name) -} - -// RemoveByName removes a NamedHandler by name. -func (l *HandlerList) RemoveByName(name string) { - for i := 0; i < len(l.list); i++ { - m := l.list[i] - if m.Name == name { - // Shift array preventing creating new arrays - copy(l.list[i:], l.list[i+1:]) - l.list[len(l.list)-1] = NamedHandler{} - l.list = l.list[:len(l.list)-1] - - // decrement list so next check to length is correct - i-- - } - } -} - -// SwapNamed will swap out any existing handlers with the same name as the -// passed in NamedHandler returning true if handlers were swapped. False is -// returned otherwise. -func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == n.Name { - l.list[i].Fn = n.Fn - swapped = true - } - } - - return swapped -} - -// Swap will swap out all handlers matching the name passed in. The matched -// handlers will be swapped in. True is returned if the handlers were swapped. -func (l *HandlerList) Swap(name string, replace NamedHandler) bool { - var swapped bool - - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == name { - l.list[i] = replace - swapped = true - } - } - - return swapped -} - -// SetBackNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the end of the list. -func (l *HandlerList) SetBackNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushBackNamed(n) - } -} - -// SetFrontNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the beginning of -// the list. -func (l *HandlerList) SetFrontNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushFrontNamed(n) - } -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// WithAppendUserAgent will add a string to the user agent prefixed with a -// single white space. -func WithAppendUserAgent(s string) Option { - return func(r *Request) { - r.Handlers.Build.PushBack(func(r2 *Request) { - AddToUserAgent(r, s) - }) - } -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} - -// WithSetRequestHeaders updates the operation request's HTTP header to contain -// the header key value pairs provided. If the header key already exists in the -// request's HTTP header set, the existing value(s) will be replaced. -func WithSetRequestHeaders(h map[string]string) Option { - return withRequestHeader(h).SetRequestHeaders -} - -type withRequestHeader map[string]string - -func (h withRequestHeader) SetRequestHeaders(r *Request) { - for k, v := range h { - r.HTTPRequest.Header[k] = []string{v} - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index 79f79602..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := new(http.Request) - *req = *r - req.URL = &url.URL{} - *req.URL = *r.URL - req.Body = body - - req.Header = http.Header{} - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go deleted file mode 100644 index 9370fa50..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ /dev/null @@ -1,65 +0,0 @@ -package request - -import ( - "io" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.Mutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { - reader := &offsetReader{} - _, err := buf.Seek(offset, sdkio.SeekStart) - if err != nil { - return nil, err - } - - reader.buf = buf - return reader, nil -} - -// Close will close the instance of the offset reader's access to -// the underlying io.ReadSeeker. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read of the underlying io.ReadSeeker -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.Lock() - defer o.lock.Unlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// Seek is a thread-safe seeking operation. -func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { - o.lock.Lock() - defer o.lock.Unlock() - - return o.buf.Seek(offset, whence) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { - if err := o.Close(); err != nil { - return nil, err - } - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go deleted file mode 100644 index 5050db4e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ /dev/null @@ -1,694 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ErrCodeSerialization is the serialization error code that is received - // during protocol unmarshaling. - ErrCodeSerialization = "SerializationError" - - // ErrCodeRead is an error that is returned during HTTP reads. - ErrCodeRead = "ReadError" - - // ErrCodeResponseTimeout is the connection timeout error that is received - // during body reads. - ErrCodeResponseTimeout = "ResponseTimeout" - - // ErrCodeInvalidPresignExpire is returned when the expire time provided to - // presign is invalid - ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" - - // CanceledErrorCode is the error code that will be returned by an - // API request that was canceled. Requests given a aws.Context may - // return this error when canceled. - CanceledErrorCode = "RequestCanceled" - - // ErrCodeRequestError is an error preventing the SDK from continuing to - // process the request. - ErrCodeRequestError = "RequestError" -) - -// A Request is the service request to be made. -type Request struct { - Config aws.Config - ClientInfo metadata.ClientInfo - Handlers Handlers - - Retryer - AttemptTime time.Time - Time time.Time - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - streamingBody io.ReadCloser - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time - DisableFollowRedirects bool - - // Additional API error codes that should be retried. IsErrorRetryable - // will consider these codes in addition to its built in cases. - RetryErrorCodes []string - - // Additional API error codes that should be retried with throttle backoff - // delay. IsErrorThrottle will consider these codes in addition to its - // built in cases. - ThrottleErrorCodes []string - - // A value greater than 0 instructs the request to be signed as Presigned URL - // You should not set this field directly. Instead use Request's - // Presign or PresignRequest methods. - ExpireTime time.Duration - - context aws.Context - - built bool - - // Need to persist an intermediate body between the input Body and HTTP - // request body because the HTTP Client's transport can maintain a reference - // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and wrap the input Body for each HTTP request. - safeBody *offsetReader -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator - - BeforePresignFn func(r *Request) error -} - -// New returns a new Request pointer for the service API operation and -// parameters. -// -// A Retryer should be provided to direct how the request is retried. If -// Retryer is nil, a default no retry value will be used. You can use -// NoOpRetryer in the Client package to disable retry behavior directly. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - if retryer == nil { - retryer = noOpRetryer{} - } - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - - r := &Request{ - Config: cfg, - ClientInfo: clientInfo, - Handlers: handlers.Copy(), - - Retryer: retryer, - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// A Option is a functional option that can augment or modify a request when -// using a WithContext API operation method. -type Option func(*Request) - -// WithGetResponseHeader builds a request Option which will retrieve a single -// header value from the HTTP Response. If there are multiple values for the -// header key use WithGetResponseHeaders instead to access the http.Header -// map directly. The passed in val pointer must be non-nil. -// -// This Option can be used multiple times with a single API operation. -// -// var id2, versionID string -// svc.PutObjectWithContext(ctx, params, -// request.WithGetResponseHeader("x-amz-id-2", &id2), -// request.WithGetResponseHeader("x-amz-version-id", &versionID), -// ) -func WithGetResponseHeader(key string, val *string) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *val = req.HTTPResponse.Header.Get(key) - }) - } -} - -// WithGetResponseHeaders builds a request Option which will retrieve the -// headers from the HTTP response and assign them to the passed in headers -// variable. The passed in headers pointer must be non-nil. -// -// var headers http.Header -// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) -func WithGetResponseHeaders(headers *http.Header) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *headers = req.HTTPResponse.Header - }) - } -} - -// WithLogLevel is a request option that will set the request to use a specific -// log level when the request is made. -// -// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) -func WithLogLevel(l aws.LogLevelType) Option { - return func(r *Request) { - r.Config.LogLevel = aws.LogLevel(l) - } -} - -// ApplyOptions will apply each option to the request calling them in the order -// the were provided. -func (r *Request) ApplyOptions(opts ...Option) { - for _, opt := range opts { - opt(r) - } -} - -// Context will always returns a non-nil context. If Request does not have a -// context aws.BackgroundContext will be returned. -func (r *Request) Context() aws.Context { - if r.context != nil { - return r.context - } - return aws.BackgroundContext() -} - -// SetContext adds a Context to the current request that can be used to cancel -// a in-flight request. The Context value must not be nil, or this method will -// panic. -// -// Unlike http.Request.WithContext, SetContext does not return a copy of the -// Request. It is not safe to use use a single Request value for multiple -// requests. A new Request should be created for each API operation request. -// -// Go 1.6 and below: -// The http.Request's Cancel field will be set to the Done() value of -// the context. This will overwrite the Cancel field's value. -// -// Go 1.7 and above: -// The http.Request.WithContext will be used to set the context on the underlying -// http.Request. This will create a shallow copy of the http.Request. The SDK -// may create sub contexts in the future for nested requests such as retries. -func (r *Request) SetContext(ctx aws.Context) { - if ctx == nil { - panic("context cannot be nil") - } - setRequestContext(r, ctx) -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { - return false - } - return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -func fmtAttemptCount(retryCount, maxRetries int) string { - return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.Body = reader - - if aws.IsReaderSeekable(reader) { - var err error - // Get the Bodies current offset so retries will start from the same - // initial position. - r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to determine start of request body", err) - return - } - } - r.ResetBody() -} - -// SetStreamingBody set the reader to be used for the request that will stream -// bytes to the server. Request's Body must not be set to any reader. -func (r *Request) SetStreamingBody(reader io.ReadCloser) { - r.streamingBody = reader - r.SetReaderBody(aws.ReadSeekCloser(reader)) -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. The expire parameter is only used for presigned Amazon -// S3 API requests. All other AWS services will use a fixed expiration -// time of 15 minutes. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -func (r *Request) Presign(expire time.Duration) (string, error) { - r = r.copy() - - // Presign requires all headers be hoisted. There is no way to retrieve - // the signed headers not hoisted without this. Making the presigned URL - // useless. - r.NotHoist = false - - u, _, err := getPresignedURL(r, expire) - return u, err -} - -// PresignRequest behaves just like presign, with the addition of returning a -// set of headers that were signed. The expire parameter is only used for -// presigned Amazon S3 API requests. All other AWS services will use a fixed -// expiration time of 15 minutes. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -// -// Returns the URL string for the API operation with signature in the query string, -// and the HTTP headers that were included in the signature. These headers must -// be included in any HTTP request made with the presigned URL. -// -// To prevent hoisting any headers to the query string set NotHoist to true on -// this Request value prior to calling PresignRequest. -func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { - r = r.copy() - return getPresignedURL(r, expire) -} - -// IsPresigned returns true if the request represents a presigned API url. -func (r *Request) IsPresigned() bool { - return r.ExpireTime != 0 -} - -func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { - if expire <= 0 { - return "", nil, awserr.New( - ErrCodeInvalidPresignExpire, - "presigned URL requires an expire duration greater than 0", - nil, - ) - } - - r.ExpireTime = expire - - if r.Operation.BeforePresignFn != nil { - if err := r.Operation.BeforePresignFn(r); err != nil { - return "", nil, err - } - } - - if err := r.Sign(); err != nil { - return "", nil, err - } - - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -const ( - notRetrying = "not retrying" -) - -func debugLogReqError(r *Request, stage, retryStr string, err error) { - if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { - return - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Any additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", notRetrying, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request, returning error if errors are encountered. -// -// Sign will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) - return r.Error - } - - SanitizeHostForHeader(r.HTTPRequest) - - r.Handlers.Sign.Run(r) - return r.Error -} - -func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { - if r.streamingBody != nil { - return r.streamingBody, nil - } - - if r.safeBody != nil { - r.safeBody.Close() - } - - r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to get next request body reader", err) - } - - // Go 1.8 tightened and clarified the rules code needs to use when building - // requests with the http package. Go 1.8 removed the automatic detection - // of if the Request.Body was empty, or actually had bytes in it. The SDK - // always sets the Request.Body even if it is empty and should not actually - // be sent. This is incorrect. - // - // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http - // client that the request really should be sent without a body. The - // Request.Body cannot be set to nil, which is preferable, because the - // field is exported and could introduce nil pointer dereferences for users - // of the SDK if they used that field. - // - // Related golang/go#18257 - l, err := aws.SeekerLen(r.Body) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to compute request body size", err) - } - - if l == 0 { - body = NoBody - } else if l > 0 { - body = r.safeBody - } else { - // Hack to prevent sending bodies for methods where the body - // should be ignored by the server. Sending bodies on these - // methods without an associated ContentLength will cause the - // request to socket timeout because the server does not handle - // Transfer-Encoding: chunked bodies for these methods. - // - // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker, or did not implement - // Len() method. - switch r.Operation.HTTPMethod { - case "GET", "HEAD", "DELETE": - body = NoBody - default: - body = r.safeBody - } - } - - return body, nil -} - -// GetBody will return an io.ReadSeeker of the Request's underlying -// input body with a concurrency safe wrapper. -func (r *Request) GetBody() io.ReadSeeker { - return r.safeBody -} - -// Send will send the request, returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -// -// Send will not close the request.Request's body. -func (r *Request) Send() error { - defer func() { - // Regardless of success or failure of the request trigger the Complete - // request handlers. - r.Handlers.Complete.Run(r) - }() - - if err := r.Error; err != nil { - return err - } - - for { - r.Error = nil - r.AttemptTime = time.Now() - - if err := r.Sign(); err != nil { - debugLogReqError(r, "Sign Request", notRetrying, err) - return err - } - - if err := r.sendRequest(); err == nil { - return nil - } - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } - - if err := r.prepareRetry(); err != nil { - r.Error = err - return err - } - } -} - -func (r *Request) prepareRetry() error { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - if err := r.Error; err != nil { - return awserr.New(ErrCodeSerialization, - "failed to prepare body for retry", err) - - } - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - - return nil -} - -func (r *Request) sendRequest() (sendErr error) { - defer r.Handlers.CompleteAttempt.Run(r) - - r.Retryable = nil - r.Handlers.Send.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - debugLogReqError(r, "Validate Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - return nil -} - -// copy will copy a request which will allow for local manipulation of the -// request. -func (r *Request) copy() *Request { - req := &Request{} - *req = *r - req.Handlers = r.Handlers.Copy() - op := *r.Operation - req.Operation = &op - return req -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go deleted file mode 100644 index e36e468b..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !go1.8 - -package request - -import "io" - -// NoBody is an io.ReadCloser with no bytes. Read always returns EOF -// and Close always returns nil. It can be used in an outgoing client -// request to explicitly signal that a request has zero bytes. -// An alternative, however, is to simply set Request.Body to nil. -// -// Copy of Go 1.8 NoBody type from net/http/http.go -type noBody struct{} - -func (noBody) Read([]byte) (int, error) { return 0, io.EOF } -func (noBody) Close() error { return nil } -func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } - -// NoBody is an empty reader that will trigger the Go HTTP client to not include -// and body in the HTTP request. -var NoBody = noBody{} - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go deleted file mode 100644 index de1292f4..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build go1.8 - -package request - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// NoBody is a http.NoBody reader instructing Go HTTP client to not include -// and body in the HTTP request. -var NoBody = http.NoBody - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -// -// Will also set the Go 1.8's http.Request.GetBody member to allow retrying -// PUT/POST redirects. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to reset request body", err) - return - } - - r.HTTPRequest.Body = body - r.HTTPRequest.GetBody = r.getNextRequestBody -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go deleted file mode 100644 index a7365cd1..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest = r.HTTPRequest.WithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go deleted file mode 100644 index 307fa070..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest.Cancel = ctx.Done() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go deleted file mode 100644 index 64784e16..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ /dev/null @@ -1,266 +0,0 @@ -package request - -import ( - "reflect" - "sync/atomic" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// A Pagination provides paginating of SDK API operations which are paginatable. -// Generally you should not use this type directly, but use the "Pages" API -// operations method to automatically perform pagination for you. Such as, -// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. -// -// Pagination differs from a Paginator type in that pagination is the type that -// does the pagination between API operations, and Paginator defines the -// configuration that will be used per page request. -// -// for p.Next() { -// data := p.Page().(*s3.ListObjectsOutput) -// // process the page's data -// // ... -// // break out of loop to stop fetching additional pages -// } -// -// return p.Err() -// -// See service client API operation Pages methods for examples how the SDK will -// use the Pagination type. -type Pagination struct { - // Function to return a Request value for each pagination request. - // Any configuration or handlers that need to be applied to the request - // prior to getting the next page should be done here before the request - // returned. - // - // NewRequest should always be built from the same API operations. It is - // undefined if different API operations are returned on subsequent calls. - NewRequest func() (*Request, error) - // EndPageOnSameToken, when enabled, will allow the paginator to stop on - // token that are the same as its previous tokens. - EndPageOnSameToken bool - - started bool - prevTokens []interface{} - nextTokens []interface{} - - err error - curPage interface{} -} - -// HasNextPage will return true if Pagination is able to determine that the API -// operation has additional pages. False will be returned if there are no more -// pages remaining. -// -// Will always return true if Next has not been called yet. -func (p *Pagination) HasNextPage() bool { - if !p.started { - return true - } - - hasNextPage := len(p.nextTokens) != 0 - if p.EndPageOnSameToken { - return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) - } - return hasNextPage -} - -// Err returns the error Pagination encountered when retrieving the next page. -func (p *Pagination) Err() error { - return p.err -} - -// Page returns the current page. Page should only be called after a successful -// call to Next. It is undefined what Page will return if Page is called after -// Next returns false. -func (p *Pagination) Page() interface{} { - return p.curPage -} - -// Next will attempt to retrieve the next page for the API operation. When a page -// is retrieved true will be returned. If the page cannot be retrieved, or there -// are no more pages false will be returned. -// -// Use the Page method to retrieve the current page data. The data will need -// to be cast to the API operation's output type. -// -// Use the Err method to determine if an error occurred if Page returns false. -func (p *Pagination) Next() bool { - if !p.HasNextPage() { - return false - } - - req, err := p.NewRequest() - if err != nil { - p.err = err - return false - } - - if p.started { - for i, intok := range req.Operation.InputTokens { - awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) - } - } - p.started = true - - err = req.Send() - if err != nil { - p.err = err - return false - } - - p.prevTokens = p.nextTokens - p.nextTokens = req.nextPageTokens() - p.curPage = req.Data - - return true -} - -// A Paginator is the configuration data that defines how an API operation -// should be paginated. This type is used by the API service models to define -// the generated pagination config for service APIs. -// -// The Pagination type is what provides iterating between pages of an API. It -// is only used to store the token metadata the SDK should use for performing -// pagination. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// nextPageTokens returns the tokens to use when asking for the next page of data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !aws.BoolValue(v) { - return nil - } - case bool: - if !v { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - vs, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(vs) == 0 { - tokens = append(tokens, nil) - continue - } - v := vs[0] - - switch tv := v.(type) { - case *string: - if len(aws.StringValue(tv)) == 0 { - tokens = append(tokens, nil) - continue - } - case string: - if len(tv) == 0 { - tokens = append(tokens, nil) - continue - } - } - - tokenAdded = true - tokens = append(tokens, v) - } - if !tokenAdded { - return nil - } - - return tokens -} - -// Ensure a deprecated item is only logged once instead of each time its used. -func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { - if logger == nil { - return - } - if atomic.CompareAndSwapInt32(flag, 0, 1) { - logger.Log(msg) - } -} - -var ( - logDeprecatedHasNextPage int32 - logDeprecatedNextPage int32 - logDeprecatedEachPage int32 -) - -// HasNextPage returns true if this request has more pages of data available. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) HasNextPage() bool { - logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, - "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") - - return len(r.nextPageTokens()) > 0 -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) NextPage() *Request { - logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, - "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") - - tokens := r.nextPageTokens() - if len(tokens) == 0 { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, - "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") - - for page := r; page != nil; page = page.NextPage() { - if err := page.Send(); err != nil { - return err - } - if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { - return page.Error - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go deleted file mode 100644 index 752ae47f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ /dev/null @@ -1,309 +0,0 @@ -package request - -import ( - "net" - "net/url" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Retryer provides the interface drive the SDK's request retry behavior. The -// Retryer implementation is responsible for implementing exponential backoff, -// and determine if a request API error should be retried. -// -// client.DefaultRetryer is the SDK's default implementation of the Retryer. It -// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle -// methods to determine if the request is retried. -type Retryer interface { - // RetryRules return the retry delay that should be used by the SDK before - // making another request attempt for the failed request. - RetryRules(*Request) time.Duration - - // ShouldRetry returns if the failed request is retryable. - // - // Implementations may consider request attempt count when determining if a - // request is retryable, but the SDK will use MaxRetries to limit the - // number of attempts a request are made. - ShouldRetry(*Request) bool - - // MaxRetries is the number of times a request may be retried before - // failing. - MaxRetries() int -} - -// WithRetryer sets a Retryer value to the given Config returning the Config -// value for chaining. The value must not be nil. -func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { - if retryer == nil { - if cfg.Logger != nil { - cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") - } - retryer = noOpRetryer{} - } - cfg.Retryer = retryer - return cfg - -} - -// noOpRetryer is a internal no op retryer used when a request is created -// without a retryer. -// -// Provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type noOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d noOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d noOpRetryer) ShouldRetry(_ *Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d noOpRetryer) RetryRules(_ *Request) time.Duration { - return 0 -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - ErrCodeRequestError: {}, - "RequestTimeout": {}, - ErrCodeResponseTimeout: {}, - "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "RequestThrottledException": {}, - "TooManyRequestsException": {}, // Lambda functions - "PriorRequestNotComplete": {}, // Route53 - "TransactionInProgressException": {}, - "EC2ThrottledException": {}, // EC2 -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -var validParentCodes = map[string]struct{}{ - ErrCodeSerialization: {}, - ErrCodeRead: {}, -} - -func isNestedErrorRetryable(parentErr awserr.Error) bool { - if parentErr == nil { - return false - } - - if _, ok := validParentCodes[parentErr.Code()]; !ok { - return false - } - - err := parentErr.OrigErr() - if err == nil { - return false - } - - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) - } - - if t, ok := err.(temporary); ok { - return t.Temporary() || isErrConnectionReset(err) - } - - return isErrConnectionReset(err) -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if error is nil. -func IsErrorRetryable(err error) bool { - if err == nil { - return false - } - return shouldRetryError(err) -} - -type temporary interface { - Temporary() bool -} - -func shouldRetryError(origErr error) bool { - switch err := origErr.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - if isNestedErrorRetryable(err) { - return true - } - - origErr := err.OrigErr() - var shouldRetry bool - if origErr != nil { - shouldRetry = shouldRetryError(origErr) - if err.Code() == ErrCodeRequestError && !shouldRetry { - return false - } - } - if isCodeRetryable(err.Code()) { - return true - } - return shouldRetry - - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryError(err.Err) - - case temporary: - if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { - return true - } - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() || isErrConnectionReset(origErr) - - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retryable. See - // TestRequest4xxUnretryable for an example. - return true - - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if error is nil. -func IsErrorThrottle(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeThrottle(aerr.Code()) - } - return false -} - -// IsErrorExpiredCreds returns whether the error code is a credential expiry -// error. Returns false if error is nil. -func IsErrorExpiredCreds(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeExpiredCreds(aerr.Code()) - } - return false -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorRetryable -func (r *Request) IsErrorRetryable() bool { - if isErrCode(r.Error, r.RetryErrorCodes) { - return true - } - - // HTTP response status code 501 should not be retried. - // 501 represents Not Implemented which means the request method is not - // supported by the server and cannot be handled. - if r.HTTPResponse != nil { - // HTTP response status code 500 represents internal server error and - // should be retried without any throttle. - if r.HTTPResponse.StatusCode == 500 { - return true - } - } - return IsErrorRetryable(r.Error) -} - -// IsErrorThrottle returns whether the error is to be throttled based on its -// code. Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorThrottle -func (r *Request) IsErrorThrottle() bool { - if isErrCode(r.Error, r.ThrottleErrorCodes) { - return true - } - - if r.HTTPResponse != nil { - switch r.HTTPResponse.StatusCode { - case - 429, // error caused due to too many requests - 502, // Bad Gateway error should be throttled - 503, // caused when service is unavailable - 504: // error occurred due to gateway timeout - return true - } - } - - return IsErrorThrottle(r.Error) -} - -func isErrCode(err error, codes []string) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - for _, code := range codes { - if code == aerr.Code() { - return true - } - } - } - - return false -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorExpiredCreds -func (r *Request) IsErrorExpired() bool { - return IsErrorExpiredCreds(r.Error) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go deleted file mode 100644 index 09a44eb9..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go +++ /dev/null @@ -1,94 +0,0 @@ -package request - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var timeoutErr = awserr.New( - ErrCodeResponseTimeout, - "read on body has reached the timeout limit", - nil, -) - -type readResult struct { - n int - err error -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, timeoutErr - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -const ( - // HandlerResponseTimeout is what we use to signify the name of the - // response timeout handler. - HandlerResponseTimeout = "ResponseTimeoutHandler" -) - -// adaptToResponseTimeoutError is a handler that will replace any top level error -// to a ErrCodeResponseTimeout, if its child is that. -func adaptToResponseTimeoutError(req *Request) { - if err, ok := req.Error.(awserr.Error); ok { - aerr, ok := err.OrigErr().(awserr.Error) - if ok && aerr.Code() == ErrCodeResponseTimeout { - req.Error = aerr - } - } -} - -// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. -// This will allow for per read timeouts. If a timeout occurred, we will return the -// ErrCodeResponseTimeout. -// -// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) -func WithResponseReadTimeout(duration time.Duration) Option { - return func(r *Request) { - - var timeoutHandler = NamedHandler{ - HandlerResponseTimeout, - func(req *Request) { - req.HTTPResponse.Body = &timeoutReadCloser{ - reader: req.HTTPResponse.Body, - duration: duration, - } - }} - - // remove the handler so we are not stomping over any new durations. - r.Handlers.Send.RemoveByName(HandlerResponseTimeout) - r.Handlers.Send.PushBackNamed(timeoutHandler) - - r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) - r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go deleted file mode 100644 index 8630683f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ /dev/null @@ -1,286 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" - // ParamMaxLenErrCode is the error code for value being too long. - ParamMaxLenErrCode = "ParamMaxLenError" - - // ParamFormatErrCode is the error code for a field with invalid - // format or characters. - ParamFormatErrCode = "ParamFormatInvalidError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinLenErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} - -// An ErrParamMaxLen represents a maximum length parameter error. -type ErrParamMaxLen struct { - errInvalidParam - max int -} - -// NewErrParamMaxLen creates a new maximum length parameter error. -func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { - return &ErrParamMaxLen{ - errInvalidParam: errInvalidParam{ - code: ParamMaxLenErrCode, - field: field, - msg: fmt.Sprintf("maximum size of %v, %v", max, value), - }, - max: max, - } -} - -// MaxLen returns the field's required minimum length. -func (e *ErrParamMaxLen) MaxLen() int { - return e.max -} - -// An ErrParamFormat represents a invalid format parameter error. -type ErrParamFormat struct { - errInvalidParam - format string -} - -// NewErrParamFormat creates a new invalid format parameter error. -func NewErrParamFormat(field string, format, value string) *ErrParamFormat { - return &ErrParamFormat{ - errInvalidParam: errInvalidParam{ - code: ParamFormatErrCode, - field: field, - msg: fmt.Sprintf("format %v, %v", format, value), - }, - format: format, - } -} - -// Format returns the field's required format. -func (e *ErrParamFormat) Format() string { - return e.format -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go deleted file mode 100644 index 4601f883..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ /dev/null @@ -1,295 +0,0 @@ -package request - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when -// the waiter's max attempts have been exhausted. -const WaiterResourceNotReadyErrorCode = "ResourceNotReady" - -// A WaiterOption is a function that will update the Waiter value's fields to -// configure the waiter. -type WaiterOption func(*Waiter) - -// WithWaiterMaxAttempts returns the maximum number of times the waiter should -// attempt to check the resource for the target state. -func WithWaiterMaxAttempts(max int) WaiterOption { - return func(w *Waiter) { - w.MaxAttempts = max - } -} - -// WaiterDelay will return a delay the waiter should pause between attempts to -// check the resource state. The passed in attempt is the number of times the -// Waiter has checked the resource state. -// -// Attempt is the number of attempts the Waiter has made checking the resource -// state. -type WaiterDelay func(attempt int) time.Duration - -// ConstantWaiterDelay returns a WaiterDelay that will always return a constant -// delay the waiter should use between attempts. It ignores the number of -// attempts made. -func ConstantWaiterDelay(delay time.Duration) WaiterDelay { - return func(attempt int) time.Duration { - return delay - } -} - -// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. -func WithWaiterDelay(delayer WaiterDelay) WaiterOption { - return func(w *Waiter) { - w.Delay = delayer - } -} - -// WithWaiterLogger returns a waiter option to set the logger a waiter -// should use to log warnings and errors to. -func WithWaiterLogger(logger aws.Logger) WaiterOption { - return func(w *Waiter) { - w.Logger = logger - } -} - -// WithWaiterRequestOptions returns a waiter option setting the request -// options for each request the waiter makes. Appends to waiter's request -// options already set. -func WithWaiterRequestOptions(opts ...Option) WaiterOption { - return func(w *Waiter) { - w.RequestOptions = append(w.RequestOptions, opts...) - } -} - -// A Waiter provides the functionality to perform a blocking call which will -// wait for a resource state to be satisfied by a service. -// -// This type should not be used directly. The API operations provided in the -// service packages prefixed with "WaitUntil" should be used instead. -type Waiter struct { - Name string - Acceptors []WaiterAcceptor - Logger aws.Logger - - MaxAttempts int - Delay WaiterDelay - - RequestOptions []Option - NewRequest func([]Option) (*Request, error) - SleepWithContext func(aws.Context, time.Duration) error -} - -// ApplyOptions updates the waiter with the list of waiter options provided. -func (w *Waiter) ApplyOptions(opts ...WaiterOption) { - for _, fn := range opts { - fn(w) - } -} - -// WaiterState are states the waiter uses based on WaiterAcceptor definitions -// to identify if the resource state the waiter is waiting on has occurred. -type WaiterState int - -// String returns the string representation of the waiter state. -func (s WaiterState) String() string { - switch s { - case SuccessWaiterState: - return "success" - case FailureWaiterState: - return "failure" - case RetryWaiterState: - return "retry" - default: - return "unknown waiter state" - } -} - -// States the waiter acceptors will use to identify target resource states. -const ( - SuccessWaiterState WaiterState = iota // waiter successful - FailureWaiterState // waiter failed - RetryWaiterState // waiter needs to be retried -) - -// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor -// definition's Expected attribute. -type WaiterMatchMode int - -// Modes the waiter will use when inspecting API response to identify target -// resource states. -const ( - PathAllWaiterMatch WaiterMatchMode = iota // match on all paths - PathWaiterMatch // match on specific path - PathAnyWaiterMatch // match on any path - PathListWaiterMatch // match on list of paths - StatusWaiterMatch // match on status code - ErrorWaiterMatch // match on error -) - -// String returns the string representation of the waiter match mode. -func (m WaiterMatchMode) String() string { - switch m { - case PathAllWaiterMatch: - return "pathAll" - case PathWaiterMatch: - return "path" - case PathAnyWaiterMatch: - return "pathAny" - case PathListWaiterMatch: - return "pathList" - case StatusWaiterMatch: - return "status" - case ErrorWaiterMatch: - return "error" - default: - return "unknown waiter match mode" - } -} - -// WaitWithContext will make requests for the API operation using NewRequest to -// build API requests. The request's response will be compared against the -// Waiter's Acceptors to determine the successful state of the resource the -// waiter is inspecting. -// -// The passed in context must not be nil. If it is nil a panic will occur. The -// Context will be used to cancel the waiter's pending requests and retry delays. -// Use aws.BackgroundContext if no context is available. -// -// The waiter will continue until the target state defined by the Acceptors, -// or the max attempts expires. -// -// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's -// retryer ShouldRetry returns false. This normally will happen when the max -// wait attempts expires. -func (w Waiter) WaitWithContext(ctx aws.Context) error { - - for attempt := 1; ; attempt++ { - req, err := w.NewRequest(w.RequestOptions) - if err != nil { - waiterLogf(w.Logger, "unable to create request %v", err) - return err - } - req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) - err = req.Send() - - // See if any of the acceptors match the request's response, or error - for _, a := range w.Acceptors { - if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { - return matchErr - } - } - - // The Waiter should only check the resource state MaxAttempts times - // This is here instead of in the for loop above to prevent delaying - // unnecessary when the waiter will not retry. - if attempt == w.MaxAttempts { - break - } - - // Delay to wait before inspecting the resource again - delay := w.Delay(attempt) - if sleepFn := req.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(delay) - } else { - sleepCtxFn := w.SleepWithContext - if sleepCtxFn == nil { - sleepCtxFn = aws.SleepWithContext - } - - if err := sleepCtxFn(ctx, delay); err != nil { - return awserr.New(CanceledErrorCode, "waiter context canceled", err) - } - } - } - - return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) -} - -// A WaiterAcceptor provides the information needed to wait for an API operation -// to complete. -type WaiterAcceptor struct { - State WaiterState - Matcher WaiterMatchMode - Argument string - Expected interface{} -} - -// match returns if the acceptor found a match with the passed in request -// or error. True is returned if the acceptor made a match, error is returned -// if there was an error attempting to perform the match. -func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { - result := false - var vals []interface{} - - switch a.Matcher { - case PathAllWaiterMatch, PathWaiterMatch: - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case PathAnyWaiterMatch: - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case PathListWaiterMatch: - // ignored matcher - case StatusWaiterMatch: - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", - name, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - return false, nil - } - - switch a.State { - case SuccessWaiterState: - // waiter completed - return true, nil - case FailureWaiterState: - // Waiter failure state triggered - return true, awserr.New(WaiterResourceNotReadyErrorCode, - "failed waiting for successful resource state", err) - case RetryWaiterState: - // clear the error and retry the operation - return false, nil - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", - name, a.State) - return false, nil - } -} - -func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { - if logger != nil { - logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go deleted file mode 100644 index ea9ebb6f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build go1.7 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go deleted file mode 100644 index fec39dfc..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !go1.6,go1.5 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go deleted file mode 100644 index 1c5a5391..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !go1.7,go1.6 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go deleted file mode 100644 index cc64e24f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ /dev/null @@ -1,259 +0,0 @@ -package session - -import ( - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/processcreds" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -func resolveCredentials(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (*credentials.Credentials, error) { - - switch { - case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration - // so load that profile from shared config first. - // Github(aws/aws-sdk-go#2727) - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - - case envCfg.Creds.HasKeys(): - // Environment credentials - return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil - - case len(envCfg.WebIdentityTokenFilePath) != 0: - // Web identity token from environment, RoleARN required to also be - // set. - return assumeWebIdentity(cfg, handlers, - envCfg.WebIdentityTokenFilePath, - envCfg.RoleARN, - envCfg.RoleSessionName, - ) - - default: - // Fallback to the "default" credential resolution chain. - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - } -} - -// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but -// 'AWS_ROLE_ARN' was not set. -var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) - -// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but -// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. -var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) - -func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, - filepath string, - roleARN, sessionName string, -) (*credentials.Credentials, error) { - - if len(filepath) == 0 { - return nil, WebIdentityEmptyTokenFilePathErr - } - - if len(roleARN) == 0 { - return nil, WebIdentityEmptyRoleARNErr - } - - creds := stscreds.NewWebIdentityCredentials( - &Session{ - Config: cfg, - Handlers: handlers.Copy(), - }, - roleARN, - sessionName, - filepath, - ) - - return creds, nil -} - -func resolveCredsFromProfile(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch { - case sharedCfg.SourceProfile != nil: - // Assume IAM role with credentials source from a different profile. - creds, err = resolveCredsFromProfile(cfg, envCfg, - *sharedCfg.SourceProfile, handlers, sessOpts, - ) - - case sharedCfg.Creds.HasKeys(): - // Static Credentials from Shared Config/Credentials file. - creds = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - - case len(sharedCfg.CredentialProcess) != 0: - // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) - - case len(sharedCfg.CredentialSource) != 0: - creds, err = resolveCredsFromSource(cfg, envCfg, - sharedCfg, handlers, sessOpts, - ) - - case len(sharedCfg.WebIdentityTokenFile) != 0: - // Credentials from Assume Web Identity token require an IAM Role, and - // that roll will be assumed. May be wrapped with another assume role - // via SourceProfile. - return assumeWebIdentity(cfg, handlers, - sharedCfg.WebIdentityTokenFile, - sharedCfg.RoleARN, - sharedCfg.RoleSessionName, - ) - - default: - // Fallback to default credentials provider, include mock errors for - // the credential chain so user can identify why credentials failed to - // be retrieved. - creds = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{ - Err: awserr.New("EnvAccessKeyNotFound", - "failed to find credentials in the environment.", nil), - }, - &credProviderError{ - Err: awserr.New("SharedCredsLoad", - fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), - }, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) - } - if err != nil { - return nil, err - } - - if len(sharedCfg.RoleARN) > 0 { - cfgCp := *cfg - cfgCp.Credentials = creds - return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) - } - - return creds, nil -} - -// valid credential source values -const ( - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" -) - -func resolveCredsFromSource(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch sharedCfg.CredentialSource { - case credSourceEc2Metadata: - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - case credSourceEnvironment: - creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) - - case credSourceECSContainer: - if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - return nil, ErrSharedConfigECSContainerEnvVarEmpty - } - - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - default: - return nil, ErrSharedConfigInvalidCredSource - } - - return creds, nil -} - -func credsFromAssumeRole(cfg aws.Config, - handlers request.Handlers, - sharedCfg sharedConfig, - sessOpts Options, -) (*credentials.Credentials, error) { - - if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return nil, AssumeRoleTokenProviderNotSetError{} - } - - return stscreds.NewCredentials( - &Session{ - Config: &cfg, - Handlers: handlers.Copy(), - }, - sharedCfg.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.RoleSessionName - opt.Duration = sessOpts.AssumeRoleDuration - - // Assume role with external ID - if len(sharedCfg.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ), nil -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a -// session when the MFAToken option is not set when shared config is configured -// load assume a role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error -} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go deleted file mode 100644 index 7ec66e7e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ /dev/null @@ -1,245 +0,0 @@ -/* -Package session provides configuration for the SDK's service clients. Sessions -can be shared across service clients that share the same base configuration. - -Sessions are safe to use concurrently as long as the Session is not being -modified. Sessions should be cached when possible, because creating a new -Session will load all configuration values from the environment, and config -files each time the Session is created. Sharing the Session value across all of -your service clients will ensure the configuration is loaded the fewest number -of times possible. - -Sessions options from Shared Config - -By default NewSession will only load credentials from the shared credentials -file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is -set to a truthy value the Session will be created from the configuration -values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. Using the NewSessionWithOptions with -SharedConfigState set to SharedConfigEnable will create the session as if the -AWS_SDK_LOAD_CONFIG environment variable was set. - -Credential and config loading order - -The Session will attempt to load configuration and credentials from the -environment, configuration files, and other credential sources. The order -configuration is loaded in is: - - * Environment Variables - * Shared Credentials file - * Shared Configuration file (if SharedConfig is enabled) - * EC2 Instance Metadata (credentials only) - -The Environment variables for credentials will have precedence over shared -config even if SharedConfig is enabled. To override this behavior, and use -shared config credentials instead specify the session.Options.Profile, (e.g. -when using credential_source=Environment to assume a role). - - sess, err := session.NewSessionWithOptions(session.Options{ - Profile: "myProfile", - }) - -Creating Sessions - -Creating a Session without additional options will load credentials region, and -profile loaded from the environment and shared config automatically. See, -"Environment Variables" section for information on environment variables used -by Session. - - // Create Session - sess, err := session.NewSession() - - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded, config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. - - // Create a Session with a custom region - sess, err := session.NewSession(&aws.Config{ - Region: aws.String("us-west-2"), - }) - -Use NewSessionWithOptions to provide additional configuration driving how the -Session's configuration will be loaded. Such as, specifying shared config -profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). - - // Equivalent to session.NewSession() - sess, err := session.NewSessionWithOptions(session.Options{ - // Options - }) - - sess, err := session.NewSessionWithOptions(session.Options{ - // Specify profile to load for the session's config - Profile: "profile_name", - - // Provide SDK Config options, such as Region. - Config: aws.Config{ - Region: aws.String("us-west-2"), - }, - - // Force enable Shared Config support - SharedConfigState: session.SharedConfigEnable, - }) - -Adding Handlers - -You can add handlers to a session to decorate API operation, (e.g. adding HTTP -headers). All clients that use the Session receive a copy of the Session's -handlers. For example, the following request handler added to the Session logs -every requests made. - - // Create a session, and add additional handlers for all service - // clients created with the Session to inherit. Adds logging handler. - sess := session.Must(session.NewSession()) - - sess.Handlers.Send.PushFront(func(r *request.Request) { - // Log every request made and its payload - logger.Printf("Request: %s/%s, Params: %s", - r.ClientInfo.ServiceName, r.Operation, r.Params) - }) - -Shared Config Fields - -By default the SDK will only load the shared credentials file's -(~/.aws/credentials) credentials values, and all other config is provided by -the environment variables, SDK defaults, and user provided aws.Config values. - -If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable -option is used to create the Session the full shared config values will be -loaded. This includes credentials, region, and support for assume role. In -addition the Session will load its configuration from both the shared config -file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both -files have the same format. - -If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file -(~/.aws/config). - -Credentials are the values the SDK uses to authenticating requests with AWS -Services. When specified in a file, both aws_access_key_id and -aws_secret_access_key must be provided together in the same file to be -considered valid. They will be ignored if both are not present. -aws_session_token is an optional field that can be provided in addition to the -other two fields. - - aws_access_key_id = AKID - aws_secret_access_key = SECRET - aws_session_token = TOKEN - - ; region only supported if SharedConfigEnabled. - region = us-east-1 - -Assume Role configuration - -The role_arn field allows you to configure the SDK to assume an IAM role using -a set of credentials from another source. Such as when paired with static -credentials, "profile_source", "credential_process", or "credential_source" -fields. If "role_arn" is provided, a source of credentials must also be -specified, such as "source_profile", "credential_source", or -"credential_process". - - role_arn = arn:aws:iam:::role/ - source_profile = profile_with_creds - external_id = 1234 - mfa_serial = - role_session_name = session_name - - -The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you -must also set the Session Option.AssumeRoleTokenProvider. The Session will fail -to load if the AssumeRoleTokenProvider is not specified. - - sess := session.Must(session.NewSessionWithOptions(session.Options{ - AssumeRoleTokenProvider: stscreds.StdinTokenProvider, - })) - -To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider -documentation. - -Environment Variables - -When a Session is created several environment variables can be set to adjust -how the SDK functions, and what configuration data it loads when creating -Sessions. All environment values are optional, but some values like credentials -require multiple of the values to set or the partial values will be ignored. -All environment variable values are strings unless otherwise noted. - -Environment configuration values. If set both Access Key ID and Secret Access -Key must be provided. Session Token and optionally also be provided, but is -not required. - - # Access Key ID - AWS_ACCESS_KEY_ID=AKID - AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - - # Secret Access Key - AWS_SECRET_ACCESS_KEY=SECRET - AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - - # Session Token - AWS_SESSION_TOKEN=TOKEN - -Region value will instruct the SDK where to make service API requests to. If is -not provided in the environment the region must be provided before a service -client request is made. - - AWS_REGION=us-east-1 - - # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_REGION is not also set. - AWS_DEFAULT_REGION=us-east-1 - -Profile name the SDK should load use when loading shared config from the -configuration files. If not provided "default" will be used as the profile name. - - AWS_PROFILE=my_profile - - # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_PROFILE is not also set. - AWS_DEFAULT_PROFILE=my_profile - -SDK load config instructs the SDK to load the shared config in addition to -shared credentials. This also expands the configuration loaded so the shared -credentials will have parity with the shared config file. This also enables -Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE -env values as well. - - AWS_SDK_LOAD_CONFIG=1 - -Shared credentials file path can be set to instruct the SDK to use an alternative -file for the shared credentials. If not set the file will be loaded from -$HOME/.aws/credentials on Linux/Unix based systems, and -%USERPROFILE%\.aws\credentials on Windows. - - AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - -Shared config file path can be set to instruct the SDK to use an alternative -file for the shared config. If not set the file will be loaded from -$HOME/.aws/config on Linux/Unix based systems, and -%USERPROFILE%\.aws\config on Windows. - - AWS_CONFIG_FILE=$HOME/my_shared_config - -Path to a custom Credentials Authority (CA) bundle PEM file that the SDK -will use instead of the default system's root CA bundle. Use this only -if you want to replace the CA bundle the SDK uses for TLS requests. - - AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - -Enabling this option will attempt to merge the Transport into the SDK's HTTP -client. If the client's Transport is not a http.Transport an error will be -returned. If the Transport's TLS config is set this option will cause the SDK -to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file -contains multiple certificates all of them will be loaded. - -The Session option CustomCABundle is also available when creating sessions -to also enable this feature. CustomCABundle session option field has priority -over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - -Setting a custom HTTPClient in the aws.Config options will override this setting. -To use this option and custom HTTP client, the HTTP client needs to be provided -when creating the session. Not the service client. -*/ -package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go deleted file mode 100644 index c1e0e9c9..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ /dev/null @@ -1,345 +0,0 @@ -package session - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// EnvProviderName provides a name of the provider when config is loaded from environment. -const EnvProviderName = "EnvConfigCredentials" - -// envConfig is a collection of environment values the SDK will read -// setup config from. All environment values are optional. But some values -// such as credentials require multiple values to be complete or the values -// will be ignored. -type envConfig struct { - // Environment configuration values. If set both Access Key ID and Secret Access - // Key must be provided. Session Token and optionally also be provided, but is - // not required. - // - // # Access Key ID - // AWS_ACCESS_KEY_ID=AKID - // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - // - // # Secret Access Key - // AWS_SECRET_ACCESS_KEY=SECRET - // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - // - // # Session Token - // AWS_SESSION_TOKEN=TOKEN - Creds credentials.Value - - // Region value will instruct the SDK where to make service API requests to. If is - // not provided in the environment the region must be provided before a service - // client request is made. - // - // AWS_REGION=us-east-1 - // - // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_REGION is not also set. - // AWS_DEFAULT_REGION=us-east-1 - Region string - - // Profile name the SDK should load use when loading shared configuration from the - // shared configuration files. If not provided "default" will be used as the - // profile name. - // - // AWS_PROFILE=my_profile - // - // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_PROFILE is not also set. - // AWS_DEFAULT_PROFILE=my_profile - Profile string - - // SDK load config instructs the SDK to load the shared config in addition to - // shared credentials. This also expands the configuration loaded from the shared - // credentials to have parity with the shared config file. This also enables - // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE - // env values as well. - // - // AWS_SDK_LOAD_CONFIG=1 - EnableSharedConfig bool - - // Shared credentials file path can be set to instruct the SDK to use an alternate - // file for the shared credentials. If not set the file will be loaded from - // $HOME/.aws/credentials on Linux/Unix based systems, and - // %USERPROFILE%\.aws\credentials on Windows. - // - // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - SharedCredentialsFile string - - // Shared config file path can be set to instruct the SDK to use an alternate - // file for the shared config. If not set the file will be loaded from - // $HOME/.aws/config on Linux/Unix based systems, and - // %USERPROFILE%\.aws\config on Windows. - // - // AWS_CONFIG_FILE=$HOME/my_shared_config - SharedConfigFile string - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. - // Only use this if you want to configure the SDK to use a custom set - // of CAs. - // - // Enabling this option will attempt to merge the Transport - // into the SDK's HTTP client. If the client's Transport is - // not a http.Transport an error will be returned. If the - // Transport's TLS config is set this option will cause the - // SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this setting. - // To use this option and custom HTTP client, the HTTP client needs to be provided - // when creating the session. Not the service client. - // - // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - CustomCABundle string - - csmEnabled string - CSMEnabled *bool - CSMPort string - CSMHost string - CSMClientID string - - // Enables endpoint discovery via environment variables. - // - // AWS_ENABLE_ENDPOINT_DISCOVERY=true - EnableEndpointDiscovery *bool - enableEndpointDiscovery string - - // Specifies the WebIdentity token the SDK should use to assume a role - // with. - // - // AWS_WEB_IDENTITY_TOKEN_FILE=file_path - WebIdentityTokenFilePath string - - // Specifies the IAM role arn to use when assuming an role. - // - // AWS_ROLE_ARN=role_arn - RoleARN string - - // Specifies the IAM role session name to use when assuming a role. - // - // AWS_ROLE_SESSION_NAME=session_name - RoleSessionName string - - // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint - // for a service. - // - // AWS_STS_REGIONAL_ENDPOINTS=regional - // This can take value as `regional` or `legacy` - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // Specifies the S3 Regional Endpoint flag for the SDK to resolve the - // endpoint for a service. - // - // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional - // This can take value as `regional` or `legacy` - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // AWS_S3_USE_ARN_REGION=true - S3UseARNRegion bool -} - -var ( - csmEnabledEnvKey = []string{ - "AWS_CSM_ENABLED", - } - csmHostEnvKey = []string{ - "AWS_CSM_HOST", - } - csmPortEnvKey = []string{ - "AWS_CSM_PORT", - } - csmClientIDEnvKey = []string{ - "AWS_CSM_CLIENT_ID", - } - credAccessEnvKey = []string{ - "AWS_ACCESS_KEY_ID", - "AWS_ACCESS_KEY", - } - credSecretEnvKey = []string{ - "AWS_SECRET_ACCESS_KEY", - "AWS_SECRET_KEY", - } - credSessionEnvKey = []string{ - "AWS_SESSION_TOKEN", - } - - enableEndpointDiscoveryEnvKey = []string{ - "AWS_ENABLE_ENDPOINT_DISCOVERY", - } - - regionEnvKeys = []string{ - "AWS_REGION", - "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - profileEnvKeys = []string{ - "AWS_PROFILE", - "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - sharedCredsFileEnvKey = []string{ - "AWS_SHARED_CREDENTIALS_FILE", - } - sharedConfigFileEnvKey = []string{ - "AWS_CONFIG_FILE", - } - webIdentityTokenFilePathEnvKey = []string{ - "AWS_WEB_IDENTITY_TOKEN_FILE", - } - roleARNEnvKey = []string{ - "AWS_ROLE_ARN", - } - roleSessionNameEnvKey = []string{ - "AWS_ROLE_SESSION_NAME", - } - stsRegionalEndpointKey = []string{ - "AWS_STS_REGIONAL_ENDPOINTS", - } - s3UsEast1RegionalEndpoint = []string{ - "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", - } - s3UseARNRegionEnvKey = []string{ - "AWS_S3_USE_ARN_REGION", - } -) - -// loadEnvConfig retrieves the SDK's environment configuration. -// See `envConfig` for the values that will be retrieved. -// -// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value -// the shared SDK config will be loaded in addition to the SDK's specific -// configuration values. -func loadEnvConfig() (envConfig, error) { - enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) - return envConfigLoad(enableSharedConfig) -} - -// loadEnvSharedConfig retrieves the SDK's environment configuration, and the -// SDK shared config. See `envConfig` for the values that will be retrieved. -// -// Loads the shared configuration in addition to the SDK's specific configuration. -// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` -// environment variable is set. -func loadSharedEnvConfig() (envConfig, error) { - return envConfigLoad(true) -} - -func envConfigLoad(enableSharedConfig bool) (envConfig, error) { - cfg := envConfig{} - - cfg.EnableSharedConfig = enableSharedConfig - - // Static environment credentials - var creds credentials.Value - setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&creds.SessionToken, credSessionEnvKey) - if creds.HasKeys() { - // Require logical grouping of credentials - creds.ProviderName = EnvProviderName - cfg.Creds = creds - } - - // Role Metadata - setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) - setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) - - // Web identity environment variables - setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) - - // CSM environment variables - setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) - setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) - setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) - setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - - if len(cfg.csmEnabled) != 0 { - v, _ := strconv.ParseBool(cfg.csmEnabled) - cfg.CSMEnabled = &v - } - - regionKeys := regionEnvKeys - profileKeys := profileEnvKeys - if !cfg.EnableSharedConfig { - regionKeys = regionKeys[:1] - profileKeys = profileKeys[:1] - } - - setFromEnvVal(&cfg.Region, regionKeys) - setFromEnvVal(&cfg.Profile, profileKeys) - - // endpoint discovery is in reference to it being enabled. - setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) - if len(cfg.enableEndpointDiscovery) > 0 { - cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") - } - - setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) - setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) - - if len(cfg.SharedCredentialsFile) == 0 { - cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() - } - if len(cfg.SharedConfigFile) == 0 { - cfg.SharedConfigFile = defaults.SharedConfigFilename() - } - - cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") - - var err error - // STS Regional Endpoint variable - for _, k := range stsRegionalEndpointKey { - if v := os.Getenv(k); len(v) != 0 { - cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) - if err != nil { - return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) - } - } - } - - // S3 Regional Endpoint variable - for _, k := range s3UsEast1RegionalEndpoint { - if v := os.Getenv(k); len(v) != 0 { - cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) - } - } - } - - var s3UseARNRegion string - setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) - if len(s3UseARNRegion) != 0 { - switch { - case strings.EqualFold(s3UseARNRegion, "false"): - cfg.S3UseARNRegion = false - case strings.EqualFold(s3UseARNRegion, "true"): - cfg.S3UseARNRegion = true - default: - return envConfig{}, fmt.Errorf( - "invalid value for environment variable, %s=%s, need true or false", - s3UseARNRegionEnvKey[0], s3UseARNRegion) - } - } - - return cfg, nil -} - -func setFromEnvVal(dst *string, keys []string) { - for _, k := range keys { - if v := os.Getenv(k); len(v) != 0 { - *dst = v - break - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go deleted file mode 100644 index 0ff49960..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ /dev/null @@ -1,734 +0,0 @@ -package session - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/csm" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - // ErrCodeSharedConfig represents an error that occurs in the shared - // configuration logic - ErrCodeSharedConfig = "SharedConfigErr" -) - -// ErrSharedConfigSourceCollision will be returned if a section contains both -// source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) - -// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment -// variables are empty and Environment was set as the credential source -var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) - -// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided -var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) - -// A Session provides a central location to create service clients from and -// store configurations and request handlers for those services. -// -// Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the Session concurrently. -// -// The Session satisfies the service client's client.ConfigProvider. -type Session struct { - Config *aws.Config - Handlers request.Handlers -} - -// New creates a new instance of the handlers merging in the provided configs -// on top of the SDK's default configurations. Once the Session is created it -// can be mutated to modify the Config or Handlers. The Session is safe to be -// read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New -// method could now encounter an error when loading the configuration. When -// The environment variable is set, and an error occurs, New will return a -// session that will fail all requests reporting the error that occurred while -// loading the session. Use NewSession to get the error when creating the -// session. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded, in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. -// -// Deprecated: Use NewSession functions to create sessions instead. NewSession -// has the same functionality as New except an error can be returned when the -// func is called instead of waiting to receive an error until a request is made. -func New(cfgs ...*aws.Config) *Session { - // load initial config from environment - envCfg, envErr := loadEnvConfig() - - if envCfg.EnableSharedConfig { - var cfg aws.Config - cfg.MergeIn(cfgs...) - s, err := NewSessionWithOptions(Options{ - Config: cfg, - SharedConfigState: SharedConfigEnable, - }) - if err != nil { - // Old session.New expected all errors to be discovered when - // a request is made, and would report the errors then. This - // needs to be replicated if an error occurs while creating - // the session. - msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + - "Use session.NewSession to handle errors occurring during session creation." - - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s = &Session{Config: defaults.Config()} - s.logDeprecatedNewSessionError(msg, err, cfgs) - } - - return s - } - - s := deprecatedNewSession(cfgs...) - if envErr != nil { - msg := "failed to load env config" - s.logDeprecatedNewSessionError(msg, envErr, cfgs) - } - - if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - msg := "failed to enable CSM" - s.logDeprecatedNewSessionError(msg, err, cfgs) - } - } - - return s -} - -// NewSession returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. Once the Session is created -// it can be mutated to modify the Config or Handlers. The Session is safe to -// be read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created, such as specifying the -// config profile, and controlling if shared config is enabled or not. -func NewSession(cfgs ...*aws.Config) (*Session, error) { - opts := Options{} - opts.Config.MergeIn(cfgs...) - - return NewSessionWithOptions(opts) -} - -// SharedConfigState provides the ability to optionally override the state -// of the session's creation based on the shared config being enabled or -// disabled. -type SharedConfigState int - -const ( - // SharedConfigStateFromEnv does not override any state of the - // AWS_SDK_LOAD_CONFIG env var. It is the default value of the - // SharedConfigState type. - SharedConfigStateFromEnv SharedConfigState = iota - - // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value - // and disables the shared config functionality. - SharedConfigDisable - - // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value - // and enables the shared config functionality. - SharedConfigEnable -) - -// Options provides the means to control how a Session is created and what -// configuration values will be loaded. -// -type Options struct { - // Provides config values for the SDK to use when creating service clients - // and making API requests to services. Any value set in with this field - // will override the associated value provided by the SDK defaults, - // environment or config files where relevant. - // - // If not set, configuration values from from SDK defaults, environment, - // config will be used. - Config aws.Config - - // Overrides the config profile the Session should be created from. If not - // set the value of the environment variable will be loaded (AWS_PROFILE, - // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). - // - // If not set and environment variables are not set the "default" - // (DefaultSharedConfigProfile) will be used as the profile to load the - // session config from. - Profile string - - // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG - // environment variable. By default a Session will be created using the - // value provided by the AWS_SDK_LOAD_CONFIG environment variable. - // - // Setting this value to SharedConfigEnable or SharedConfigDisable - // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable - // and enable or disable the shared config functionality. - SharedConfigState SharedConfigState - - // Ordered list of files the session will load configuration from. - // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. - SharedConfigFiles []string - - // When the SDK's shared config is configured to assume a role with MFA - // this option is required in order to provide the mechanism that will - // retrieve the MFA token. There is no default value for this field. If - // it is not set an error will be returned when creating the session. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed. Within the context of service clients - // all sharing the same session the SDK will ensure calls to the token - // provider are atomic. When sharing a token provider across multiple - // sessions additional synchronization logic is needed to ensure the - // token providers do not introduce race conditions. It is recommend to - // share the session where possible. - // - // stscreds.StdinTokenProvider is a basic implementation that will prompt - // from stdin for the MFA token code. - // - // This field is only used if the shared configuration is enabled, and - // the config enables assume role wit MFA via the mfa_serial field. - AssumeRoleTokenProvider func() (string, error) - - // When the SDK's shared config is configured to assume a role this option - // may be provided to set the expiry duration of the STS credentials. - // Defaults to 15 minutes if not set as documented in the - // stscreds.AssumeRoleProvider. - AssumeRoleDuration time.Duration - - // Reader for a custom Credentials Authority (CA) bundle in PEM format that - // the SDK will use instead of the default system's root CA bundle. Use this - // only if you want to replace the CA bundle the SDK uses for TLS requests. - // - // Enabling this option will attempt to merge the Transport into the SDK's HTTP - // client. If the client's Transport is not a http.Transport an error will be - // returned. If the Transport's TLS config is set this option will cause the SDK - // to overwrite the Transport's TLS config's RootCAs value. If the CA - // bundle reader contains multiple certificates all of them will be loaded. - // - // The Session option CustomCABundle is also available when creating sessions - // to also enable this feature. CustomCABundle session option field has priority - // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - CustomCABundle io.Reader - - // The handlers that the session and all API clients will be created with. - // This must be a complete set of handlers. Use the defaults.Handlers() - // function to initialize this value before changing the handlers to be - // used by the SDK. - Handlers request.Handlers -} - -// NewSessionWithOptions returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. This func uses the Options -// values to configure how the Session is created. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// // Equivalent to session.New -// sess := session.Must(session.NewSessionWithOptions(session.Options{})) -// -// // Specify profile to load for the session's config -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Profile: "profile_name", -// })) -// -// // Specify profile for config and region for requests -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Config: aws.Config{Region: aws.String("us-east-1")}, -// Profile: "profile_name", -// })) -// -// // Force enable Shared Config support -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// })) -func NewSessionWithOptions(opts Options) (*Session, error) { - var envCfg envConfig - var err error - if opts.SharedConfigState == SharedConfigEnable { - envCfg, err = loadSharedEnvConfig() - if err != nil { - return nil, fmt.Errorf("failed to load shared config, %v", err) - } - } else { - envCfg, err = loadEnvConfig() - if err != nil { - return nil, fmt.Errorf("failed to load environment config, %v", err) - } - } - - if len(opts.Profile) != 0 { - envCfg.Profile = opts.Profile - } - - switch opts.SharedConfigState { - case SharedConfigDisable: - envCfg.EnableSharedConfig = false - case SharedConfigEnable: - envCfg.EnableSharedConfig = true - } - - // Only use AWS_CA_BUNDLE if session option is not provided. - if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { - f, err := os.Open(envCfg.CustomCABundle) - if err != nil { - return nil, awserr.New("LoadCustomCABundleError", - "failed to open custom CA bundle PEM file", err) - } - defer f.Close() - opts.CustomCABundle = f - } - - return newSession(opts, envCfg, &opts.Config) -} - -// Must is a helper function to ensure the Session is valid and there was no -// error when calling a NewSession function. -// -// This helper is intended to be used in variable initialization to load the -// Session and configuration at startup. Such as: -// -// var sess = session.Must(session.NewSession()) -func Must(sess *Session, err error) *Session { - if err != nil { - panic(err) - } - - return sess -} - -func deprecatedNewSession(cfgs ...*aws.Config) *Session { - cfg := defaults.Config() - handlers := defaults.Handlers() - - // Apply the passed in configs so the configuration can be applied to the - // default credential chain - cfg.MergeIn(cfgs...) - if cfg.EndpointResolver == nil { - // An endpoint resolver is required for a session to be able to provide - // endpoints for service client configurations. - cfg.EndpointResolver = endpoints.DefaultResolver() - } - cfg.Credentials = defaults.CredChain(cfg, handlers) - - // Reapply any passed in configs to override credentials if set - cfg.MergeIn(cfgs...) - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - return s -} - -func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { - if logger != nil { - logger.Log("Enabling CSM") - } - - r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) - if err != nil { - return err - } - r.InjectHandlers(handlers) - - return nil -} - -func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { - cfg := defaults.Config() - - handlers := opts.Handlers - if handlers.IsEmpty() { - handlers = defaults.Handlers() - } - - // Get a merged version of the user provided config to determine if - // credentials were. - userCfg := &aws.Config{} - userCfg.MergeIn(cfgs...) - cfg.MergeIn(userCfg) - - // Ordered config files will be loaded in with later files overwriting - // previous config file values. - var cfgFiles []string - if opts.SharedConfigFiles != nil { - cfgFiles = opts.SharedConfigFiles - } else { - cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} - if !envCfg.EnableSharedConfig { - // The shared config file (~/.aws/config) is only loaded if instructed - // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). - cfgFiles = cfgFiles[1:] - } - } - - // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) - if err != nil { - if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { - // Special case where the user has not explicitly specified an AWS_PROFILE, - // or session.Options.profile, shared config is not enabled, and the - // environment has credentials, allow the shared config file to fail to - // load since the user has already provided credentials, and nothing else - // is required to be read file. Github(aws/aws-sdk-go#2455) - } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return nil, err - } - } - - if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { - return nil, err - } - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - - if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - return nil, err - } - } - - // Setup HTTP client with custom cert bundle if enabled - if opts.CustomCABundle != nil { - if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { - return nil, err - } - } - - return s, nil -} - -type csmConfig struct { - Enabled bool - Host string - Port string - ClientID string -} - -var csmProfileName = "aws_csm" - -func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { - if envCfg.CSMEnabled != nil { - if *envCfg.CSMEnabled { - return csmConfig{ - Enabled: true, - ClientID: envCfg.CSMClientID, - Host: envCfg.CSMHost, - Port: envCfg.CSMPort, - }, nil - } - return csmConfig{}, nil - } - - sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) - if err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return csmConfig{}, err - } - } - if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { - return csmConfig{ - Enabled: true, - ClientID: sharedCfg.CSMClientID, - Host: sharedCfg.CSMHost, - Port: sharedCfg.CSMPort, - }, nil - } - - return csmConfig{}, nil -} - -func loadCustomCABundle(s *Session, bundle io.Reader) error { - var t *http.Transport - switch v := s.Config.HTTPClient.Transport.(type) { - case *http.Transport: - t = v - default: - if s.Config.HTTPClient.Transport != nil { - return awserr.New("LoadCustomCABundleError", - "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) - } - } - if t == nil { - // Nil transport implies `http.DefaultTransport` should be used. Since - // the SDK cannot modify, nor copy the `DefaultTransport` specifying - // the values the next closest behavior. - t = getCABundleTransport() - } - - p, err := loadCertPool(bundle) - if err != nil { - return err - } - if t.TLSClientConfig == nil { - t.TLSClientConfig = &tls.Config{} - } - t.TLSClientConfig.RootCAs = p - - s.Config.HTTPClient.Transport = t - - return nil -} - -func loadCertPool(r io.Reader) (*x509.CertPool, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New("LoadCustomCABundleError", - "failed to read custom CA bundle PEM file", err) - } - - p := x509.NewCertPool() - if !p.AppendCertsFromPEM(b) { - return nil, awserr.New("LoadCustomCABundleError", - "failed to load custom CA bundle PEM file", err) - } - - return p, nil -} - -func mergeConfigSrcs(cfg, userCfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) error { - - // Region if not already set by user - if len(aws.StringValue(cfg.Region)) == 0 { - if len(envCfg.Region) > 0 { - cfg.WithRegion(envCfg.Region) - } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { - cfg.WithRegion(sharedCfg.Region) - } - } - - if cfg.EnableEndpointDiscovery == nil { - if envCfg.EnableEndpointDiscovery != nil { - cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) - } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { - cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) - } - } - - // Regional Endpoint flag for STS endpoint resolving - mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ - userCfg.STSRegionalEndpoint, - envCfg.STSRegionalEndpoint, - sharedCfg.STSRegionalEndpoint, - endpoints.LegacySTSEndpoint, - }) - - // Regional Endpoint flag for S3 endpoint resolving - mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ - userCfg.S3UsEast1RegionalEndpoint, - envCfg.S3UsEast1RegionalEndpoint, - sharedCfg.S3UsEast1RegionalEndpoint, - endpoints.LegacyS3UsEast1Endpoint, - }) - - // Configure credentials if not already set by the user when creating the - // Session. - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) - if err != nil { - return err - } - cfg.Credentials = creds - } - - cfg.S3UseARNRegion = userCfg.S3UseARNRegion - if cfg.S3UseARNRegion == nil { - cfg.S3UseARNRegion = &envCfg.S3UseARNRegion - } - if cfg.S3UseARNRegion == nil { - cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion - } - - return nil -} - -func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { - for _, v := range values { - if v != endpoints.UnsetSTSEndpoint { - cfg.STSRegionalEndpoint = v - break - } - } -} - -func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { - for _, v := range values { - if v != endpoints.UnsetS3UsEast1Endpoint { - cfg.S3UsEast1RegionalEndpoint = v - break - } - } -} - -func initHandlers(s *Session) { - // Add the Validate parameter handler if it is not disabled. - s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) - if !aws.BoolValue(s.Config.DisableParamValidation) { - s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) - } -} - -// Copy creates and returns a copy of the current Session, copying the config -// and handlers. If any additional configs are provided they will be merged -// on top of the Session's copied config. -// -// // Create a copy of the current Session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) -func (s *Session) Copy(cfgs ...*aws.Config) *Session { - newSession := &Session{ - Config: s.Config.Copy(cfgs...), - Handlers: s.Handlers.Copy(), - } - - initHandlers(newSession) - - return newSession -} - -// ClientConfig satisfies the client.ConfigProvider interface and is used to -// configure the service client instances. Passing the Session to the service -// client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - region := aws.StringValue(s.Config.Region) - resolved, err := s.resolveEndpoint(service, region, s.Config) - if err != nil { - s.Handlers.Validate.PushBack(func(r *request.Request) { - if len(r.ClientInfo.Endpoint) != 0 { - // Error occurred while resolving endpoint, but the request - // being invoked has had an endpoint specified after the client - // was created. - return - } - r.Error = err - }) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - PartitionID: resolved.PartitionID, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningNameDerived: resolved.SigningNameDerived, - SigningName: resolved.SigningName, - } -} - -func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { - - if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { - return endpoints.ResolvedEndpoint{ - URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), - SigningRegion: region, - }, nil - } - - resolved, err := cfg.EndpointResolver.EndpointFor(service, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) - opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) - // Support for STSRegionalEndpoint where the STSRegionalEndpoint is - // provided in envConfig or sharedConfig with envConfig getting - // precedence. - opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint - - // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is - // provided in envConfig or sharedConfig with envConfig getting - // precedence. - opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) - if err != nil { - return endpoints.ResolvedEndpoint{}, err - } - - return resolved, nil -} - -// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception -// that the EndpointResolver will not be used to resolve the endpoint. The only -// endpoint set must come from the aws.Config.Endpoint field. -func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - var resolved endpoints.ResolvedEndpoint - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { - resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = aws.StringValue(s.Config.Region) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningNameDerived: resolved.SigningNameDerived, - SigningName: resolved.SigningName, - } -} - -// logDeprecatedNewSessionError function enables error handling for session -func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go deleted file mode 100644 index a8ed8807..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ /dev/null @@ -1,547 +0,0 @@ -package session - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/internal/ini" -) - -const ( - // Static Credentials group - accessKeyIDKey = `aws_access_key_id` // group required - secretAccessKey = `aws_secret_access_key` // group required - sessionTokenKey = `aws_session_token` // optional - - // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required (or credential_source) - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional - - // CSM options - csmEnabledKey = `csm_enabled` - csmHostKey = `csm_host` - csmPortKey = `csm_port` - csmClientIDKey = `csm_client_id` - - // Additional Config fields - regionKey = `region` - - // endpoint discovery group - enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional - - // External Credential Process - credentialProcessKey = `credential_process` // optional - - // Web Identity Token File - webIdentityTokenFileKey = `web_identity_token_file` // optional - - // Additional config fields for regional or legacy endpoints - stsRegionalEndpointSharedKey = `sts_regional_endpoints` - - // Additional config fields for regional or legacy endpoints - s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` - - // DefaultSharedConfigProfile is the default profile to be used when - // loading configuration from the config files if another profile name - // is not provided. - DefaultSharedConfigProfile = `default` - - // S3 ARN Region Usage - s3UseARNRegionKey = "s3_use_arn_region" -) - -// sharedConfig represents the configuration fields of the SDK config files. -type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id and - // aws_secret_access_key must be provided together in the same file to be - // considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of - // the other two fields are also provided. - // - // aws_access_key_id - // aws_secret_access_key - // aws_session_token - Creds credentials.Value - - CredentialSource string - CredentialProcess string - WebIdentityTokenFile string - - RoleARN string - RoleSessionName string - ExternalID string - MFASerial string - - SourceProfileName string - SourceProfile *sharedConfig - - // Region is the region the SDK should use for looking up AWS service - // endpoints and signing requests. - // - // region - Region string - - // EnableEndpointDiscovery can be enabled in the shared config by setting - // endpoint_discovery_enabled to true - // - // endpoint_discovery_enabled = true - EnableEndpointDiscovery *bool - - // CSM Options - CSMEnabled *bool - CSMHost string - CSMPort string - CSMClientID string - - // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service - // - // sts_regional_endpoints = regional - // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service - // - // s3_us_east_1_regional_endpoint = regional - // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // s3_use_arn_region=true - S3UseARNRegion bool -} - -type sharedConfigFile struct { - Filename string - IniData ini.Sections -} - -// loadSharedConfig retrieves the configuration from the list of files using -// the profile provided. The order the files are listed will determine -// precedence. Values in subsequent files will overwrite values defined in -// earlier files. -// -// For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of -// A's. -// -// See sharedConfig.setFromFile for information how the config files -// will be loaded. -func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { - if len(profile) == 0 { - profile = DefaultSharedConfigProfile - } - - files, err := loadSharedConfigIniFiles(filenames) - if err != nil { - return sharedConfig{}, err - } - - cfg := sharedConfig{} - profiles := map[string]struct{}{} - if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { - return sharedConfig{}, err - } - - return cfg, nil -} - -func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { - files := make([]sharedConfigFile, 0, len(filenames)) - - for _, filename := range filenames { - sections, err := ini.OpenFile(filename) - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { - // Skip files which can't be opened and read for whatever reason - continue - } else if err != nil { - return nil, SharedConfigLoadError{Filename: filename, Err: err} - } - - files = append(files, sharedConfigFile{ - Filename: filename, IniData: sections, - }) - } - - return files, nil -} - -func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { - // Trim files from the list that don't exist. - var skippedFiles int - var profileNotFoundErr error - for _, f := range files { - if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore profiles not defined in individual files. - profileNotFoundErr = err - skippedFiles++ - continue - } - return err - } - } - if skippedFiles == len(files) { - // If all files were skipped because the profile is not found, return - // the original profile not found error. - return profileNotFoundErr - } - - if _, ok := profiles[profile]; ok { - // if this is the second instance of the profile the Assume Role - // options must be cleared because they are only valid for the - // first reference of a profile. The self linked instance of the - // profile only have credential provider options. - cfg.clearAssumeRoleOptions() - } else { - // First time a profile has been seen, It must either be a assume role - // or credentials. Assert if the credential type requires a role ARN, - // the ARN is also set. - if err := cfg.validateCredentialsRequireARN(profile); err != nil { - return err - } - } - profiles[profile] = struct{}{} - - if err := cfg.validateCredentialType(); err != nil { - return err - } - - // Link source profiles for assume roles - if len(cfg.SourceProfileName) != 0 { - // Linked profile via source_profile ignore credential provider - // options, the source profile must provide the credentials. - cfg.clearCredentialOptions() - - srcCfg := &sharedConfig{} - err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) - if err != nil { - // SourceProfile that doesn't exist is an error in configuration. - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - err = SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - return err - } - - if !srcCfg.hasCredentials() { - return SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - - cfg.SourceProfile = srcCfg - } - - return nil -} - -// setFromFile loads the configuration from the file using the profile -// provided. A sharedConfig pointer type value is used so that multiple config -// file loadings can be chained. -// -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For -// example if a config file only includes aws_access_key_id but no -// aws_secret_access_key the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { - section, ok := file.IniData.GetSection(profile) - if !ok { - // Fallback to to alternate profile name: profile - section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if !ok { - return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} - } - } - - if exOpts { - // Assume Role Parameters - updateString(&cfg.RoleARN, section, roleArnKey) - updateString(&cfg.ExternalID, section, externalIDKey) - updateString(&cfg.MFASerial, section, mfaSerialKey) - updateString(&cfg.RoleSessionName, section, roleSessionNameKey) - updateString(&cfg.SourceProfileName, section, sourceProfileKey) - updateString(&cfg.CredentialSource, section, credentialSourceKey) - updateString(&cfg.Region, section, regionKey) - - if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { - sre, err := endpoints.GetSTSRegionalEndpoint(v) - if err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - stsRegionalEndpointSharedKey, file.Filename, err) - } - cfg.STSRegionalEndpoint = sre - } - - if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { - sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - s3UsEast1RegionalSharedKey, file.Filename, err) - } - cfg.S3UsEast1RegionalEndpoint = sre - } - } - - updateString(&cfg.CredentialProcess, section, credentialProcessKey) - updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) - - // Shared Credentials - creds := credentials.Value{ - AccessKeyID: section.String(accessKeyIDKey), - SecretAccessKey: section.String(secretAccessKey), - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), - } - if creds.HasKeys() { - cfg.Creds = creds - } - - // Endpoint discovery - updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) - - // CSM options - updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) - updateString(&cfg.CSMHost, section, csmHostKey) - updateString(&cfg.CSMPort, section, csmPortKey) - updateString(&cfg.CSMClientID, section, csmClientIDKey) - - updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) - - return nil -} - -func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { - var credSource string - - switch { - case len(cfg.SourceProfileName) != 0: - credSource = sourceProfileKey - case len(cfg.CredentialSource) != 0: - credSource = credentialSourceKey - case len(cfg.WebIdentityTokenFile) != 0: - credSource = webIdentityTokenFileKey - } - - if len(credSource) != 0 && len(cfg.RoleARN) == 0 { - return CredentialRequiresARNError{ - Type: credSource, - Profile: profile, - } - } - - return nil -} - -func (cfg *sharedConfig) validateCredentialType() error { - // Only one or no credential type can be defined. - if !oneOrNone( - len(cfg.SourceProfileName) != 0, - len(cfg.CredentialSource) != 0, - len(cfg.CredentialProcess) != 0, - len(cfg.WebIdentityTokenFile) != 0, - ) { - return ErrSharedConfigSourceCollision - } - - return nil -} - -func (cfg *sharedConfig) hasCredentials() bool { - switch { - case len(cfg.SourceProfileName) != 0: - case len(cfg.CredentialSource) != 0: - case len(cfg.CredentialProcess) != 0: - case len(cfg.WebIdentityTokenFile) != 0: - case cfg.Creds.HasKeys(): - default: - return false - } - - return true -} - -func (cfg *sharedConfig) clearCredentialOptions() { - cfg.CredentialSource = "" - cfg.CredentialProcess = "" - cfg.WebIdentityTokenFile = "" - cfg.Creds = credentials.Value{} -} - -func (cfg *sharedConfig) clearAssumeRoleOptions() { - cfg.RoleARN = "" - cfg.ExternalID = "" - cfg.MFASerial = "" - cfg.RoleSessionName = "" - cfg.SourceProfileName = "" -} - -func oneOrNone(bs ...bool) bool { - var count int - - for _, b := range bs { - if b { - count++ - if count > 1 { - return false - } - } - } - - return true -} - -// updateString will only update the dst with the value in the section key, key -// is present in the section. -func updateString(dst *string, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.String(key) -} - -// updateBool will only update the dst with the value in the section key, key -// is present in the section. -func updateBool(dst *bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.Bool(key) -} - -// updateBoolPtr will only update the dst with the value in the section key, -// key is present in the section. -func updateBoolPtr(dst **bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = new(bool) - **dst = section.Bool(key) -} - -// SharedConfigLoadError is an error for the shared config file failed to load. -type SharedConfigLoadError struct { - Filename string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigLoadError) Code() string { - return "SharedConfigLoadError" -} - -// Message is the description of the error -func (e SharedConfigLoadError) Message() string { - return fmt.Sprintf("failed to load config file, %s", e.Filename) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigLoadError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigLoadError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigProfileNotExistsError is an error for the shared config when -// the profile was not find in the config file. -type SharedConfigProfileNotExistsError struct { - Profile string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigProfileNotExistsError) Code() string { - return "SharedConfigProfileNotExistsError" -} - -// Message is the description of the error -func (e SharedConfigProfileNotExistsError) Message() string { - return fmt.Sprintf("failed to get profile, %s", e.Profile) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigProfileNotExistsError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigProfileNotExistsError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigAssumeRoleError is an error for the shared config when the -// profile contains assume role information, but that information is invalid -// or not complete. -type SharedConfigAssumeRoleError struct { - RoleARN string - SourceProfile string -} - -// Code is the short id of the error. -func (e SharedConfigAssumeRoleError) Code() string { - return "SharedConfigAssumeRoleError" -} - -// Message is the description of the error -func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf( - "failed to load assume role for %s, source profile %s has no shared credentials", - e.RoleARN, e.SourceProfile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigAssumeRoleError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e SharedConfigAssumeRoleError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -// CredentialRequiresARNError provides the error for shared config credentials -// that are incorrectly configured in the shared config or credentials file. -type CredentialRequiresARNError struct { - // type of credentials that were configured. - Type string - - // Profile name the credentials were in. - Profile string -} - -// Code is the short id of the error. -func (e CredentialRequiresARNError) Code() string { - return "CredentialRequiresARNError" -} - -// Message is the description of the error -func (e CredentialRequiresARNError) Message() string { - return fmt.Sprintf( - "credential type %s requires role_arn, profile %s", - e.Type, e.Profile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e CredentialRequiresARNError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e CredentialRequiresARNError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go deleted file mode 100644 index 07ea799f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ /dev/null @@ -1,81 +0,0 @@ -package v4 - -import ( - "github.com/aws/aws-sdk-go/internal/strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// whitelist is a generic rule for whitelisting -type whitelist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// blacklist is a generic rule for blacklisting -type blacklist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go deleted file mode 100644 index 6aa2ed24..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go +++ /dev/null @@ -1,7 +0,0 @@ -package v4 - -// WithUnsignedPayload will enable and set the UnsignedPayload field to -// true of the signer. -func WithUnsignedPayload(v4 *Signer) { - v4.UnsignedPayload = true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go deleted file mode 100644 index 02cbd97e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go +++ /dev/null @@ -1,63 +0,0 @@ -package v4 - -import ( - "encoding/hex" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -type credentialValueProvider interface { - Get() (credentials.Value, error) -} - -// StreamSigner implements signing of event stream encoded payloads -type StreamSigner struct { - region string - service string - - credentials credentialValueProvider - - prevSig []byte -} - -// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages -func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { - return &StreamSigner{ - region: region, - service: service, - credentials: credentials, - prevSig: seedSignature, - } -} - -// GetSignature takes an event stream encoded headers and payload and returns a signature -func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { - credValue, err := s.credentials.Get() - if err != nil { - return nil, err - } - - sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) - - keyPath := buildSigningScope(s.region, s.service, date) - - stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) - - signature := hmacSHA256(sigKey, []byte(stringToSign)) - s.prevSig = signature - - return signature, nil -} - -func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { - return strings.Join([]string{ - "AWS4-HMAC-SHA256-PAYLOAD", - formatTime(date), - scope, - hex.EncodeToString(prevSig), - hex.EncodeToString(hashSHA256(headers)), - hex.EncodeToString(hashSHA256(payload)), - }, "\n") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go deleted file mode 100644 index bd082e9d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build go1.5 - -package v4 - -import ( - "net/url" - "strings" -) - -func getURIPath(u *url.URL) string { - var uri string - - if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") - } else { - uri = u.EscapedPath() - } - - if len(uri) == 0 { - uri = "/" - } - - return uri -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go deleted file mode 100644 index b97334c7..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ /dev/null @@ -1,846 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -// -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. -// -// Standalone Signer -// -// Generally using the signer outside of the SDK should not require any additional -// logic when using Go v1.5 or higher. The signer does this by taking advantage -// of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. -// -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: -// -// "///" -// -// // e.g. -// "//example.com/some/path" -// -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. If you're using Go v1.4 you must set -// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with -// Go v1.5 the signer will fallback to URL.Path. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath -// -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. -// -// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 -// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the -// request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP -// message. URL.Opaque generally will force Go to make requests with absolute URL. -// URL.RawPath does not do this, but RawPath must be a valid escaping of Path -// or url.EscapedPath will ignore the RawPath escaping. -// -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authorizationHeader = "Authorization" - authHeaderSignatureElem = "Signature=" - signatureQueryKey = "X-Amz-Signature" - - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" - awsV4Request = "aws4_request" - - // emptyStringSHA256 is a SHA256 of an empty string - emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` -) - -var ignoredHeaders = rules{ - blacklist{ - mapRule{ - authorizationHeader: struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a whitelist for build canonical headers. -var requiredSignedHeaders = rules{ - whitelist{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Tagging": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, -} - -// allowedHoisting is a whitelist for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - // The authentication credentials the request will be signed against. - // This value must be set to sign requests. - Credentials *credentials.Credentials - - // Sets the log level the signer should use when reporting information to - // the logger. If the logger is nil nothing will be logged. See - // aws.LogLevelType for more information on available logging levels - // - // By default nothing will be logged. - Debug aws.LogLevelType - - // The logger loging information will be written to. If there the logger - // is nil, nothing will be logged. - Logger aws.Logger - - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool - - // Disables the automatical setting of the HTTP request's Body field with the - // io.ReadSeeker passed in to the signer. This is useful if you're using a - // custom wrapper around the body for the io.ReadSeeker and want to preserve - // the Body value on the Request.Body. - // - // This does run the risk of signing a request with a body that will not be - // sent in the request. Need to ensure that the underlying data of the Body - // values are the same. - DisableRequestBodyOverwrite bool - - // currentTimeFn returns the time value which represents the current time. - // This value should only be used for testing. If it is nil the default - // time.Now will be used. - currentTimeFn func() time.Time - - // UnsignedPayload will prevent signing of the payload. This will only - // work for services that have support for this. - UnsignedPayload bool -} - -// NewSigner returns a Signer pointer configured with the credentials and optional -// option values provided. If not options are provided the Signer will use its -// default configuration. -func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { - v4 := &Signer{ - Credentials: credentials, - } - - for _, option := range options { - option(v4) - } - - return v4 -} - -type signingCtx struct { - ServiceName string - Region string - Request *http.Request - Body io.ReadSeeker - Query url.Values - Time time.Time - ExpireTime time.Duration - SignedHeaderVals http.Header - - DisableURIPathEscaping bool - - credValues credentials.Value - isPresign bool - unsignedPayload bool - - bodyDigest string - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string -} - -// Sign signs AWS v4 requests with the provided body, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. Generally for signed requests this value -// is not needed as the full request context will be captured by the http.Request -// value. It is included for reference though. -// -// Sign will set the request's Body to be the `body` parameter passed in. If -// the body is not already an io.ReadCloser, it will be wrapped within one. If -// a `nil` body parameter passed to Sign, the request's Body field will be -// also set to nil. Its important to note that this functionality will not -// change the request's ContentLength of the request. -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, false, signTime) -} - -// Presign signs AWS v4 requests with the provided body, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. For presigned requests these headers -// and their values must be included on the HTTP request when it is made. This -// is helpful to know what header values need to be shared with the party the -// presigned request will be distributed to. -// -// Presign differs from Sign in that it will sign the request using query string -// instead of header values. This allows you to share the Presigned Request's -// URL with third parties, or distribute it throughout your system with minimal -// dependencies. -// -// Presign also takes an exp value which is the duration the -// signed request will be valid after the signing time. This is allows you to -// set when the request will expire. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -// -// Presigning a S3 request will not compute the body's SHA256 hash by default. -// This is done due to the general use case for S3 presigned URLs is to share -// PUT/GET capabilities. If you would like to include the body's SHA256 in the -// presigned request's signature you can set the "X-Amz-Content-Sha256" -// HTTP header and that will be included in the request's signature. -func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, true, signTime) -} - -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { - currentTimeFn := v4.currentTimeFn - if currentTimeFn == nil { - currentTimeFn = time.Now - } - - ctx := &signingCtx{ - Request: r, - Body: body, - Query: r.URL.Query(), - Time: signTime, - ExpireTime: exp, - isPresign: isPresign, - ServiceName: service, - Region: region, - DisableURIPathEscaping: v4.DisableURIPathEscaping, - unsignedPayload: v4.UnsignedPayload, - } - - for key := range ctx.Query { - sort.Strings(ctx.Query[key]) - } - - if ctx.isRequestSigned() { - ctx.Time = currentTimeFn() - ctx.handlePresignRemoval() - } - - var err error - ctx.credValues, err = v4.Credentials.Get() - if err != nil { - return http.Header{}, err - } - - ctx.sanitizeHostForHeader() - ctx.assignAmzQueryValues() - if err := ctx.build(v4.DisableHeaderHoisting); err != nil { - return nil, err - } - - // If the request is not presigned the body should be attached to it. This - // prevents the confusion of wanting to send a signed request without - // the body the request was signed for attached. - if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { - var reader io.ReadCloser - if body != nil { - var ok bool - if reader, ok = body.(io.ReadCloser); !ok { - reader = ioutil.NopCloser(body) - } - } - r.Body = reader - } - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo(ctx) - } - - return ctx.SignedHeaderVals, nil -} - -func (ctx *signingCtx) sanitizeHostForHeader() { - request.SanitizeHostForHeader(ctx.Request) -} - -func (ctx *signingCtx) handlePresignRemoval() { - if !ctx.isPresign { - return - } - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - ctx.removePresign() - - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - ctx.Request.URL.RawQuery = ctx.Query.Encode() -} - -func (ctx *signingCtx) assignAmzQueryValues() { - if ctx.isPresign { - ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if ctx.credValues.SessionToken != "" { - ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } else { - ctx.Query.Del("X-Amz-Security-Token") - } - - return - } - - if ctx.credValues.SessionToken != "" { - ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } -} - -// SignRequestHandler is a named request handler the SDK will use to sign -// service client request with using the V4 signature. -var SignRequestHandler = request.NamedHandler{ - Name: "v4.SignRequestHandler", Fn: SignSDKRequest, -} - -// SignSDKRequest signs an AWS request with the V4 signature. This -// request handler should only be used with the SDK's built in service client's -// API operation requests. -// -// This function should not be used on its on its own, but in conjunction with -// an AWS service client's API operation call. To sign a standalone request -// not created by a service client's API operation method use the "Sign" or -// "Presign" functions of the "Signer" type. -// -// If the credentials of the request's config are set to -// credentials.AnonymousCredentials the request will not be signed. -func SignSDKRequest(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now) -} - -// BuildNamedHandler will build a generic handler for signing. -func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { - return request.NamedHandler{ - Name: name, - Fn: func(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now, opts...) - }, - } -} - -// SignSDKRequestWithCurrentTime will sign the SDK's request using the time -// function passed in. Behaves the same as SignSDKRequest with the exception -// the request is signed with the value returned by the current time function. -func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { - v4.Debug = req.Config.LogLevel.Value() - v4.Logger = req.Config.Logger - v4.DisableHeaderHoisting = req.NotHoist - v4.currentTimeFn = curTimeFn - if name == "s3" { - // S3 service should not have any escaping applied - v4.DisableURIPathEscaping = true - } - // Prevents setting the HTTPRequest's Body. Since the Body could be - // wrapped in a custom io.Closer that we do not want to be stompped - // on top of by the signer. - v4.DisableRequestBodyOverwrite = true - }) - - for _, opt := range opts { - opt(v4) - } - - curTime := curTimeFn() - signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, req.ExpireTime > 0, curTime, - ) - if err != nil { - req.Error = err - req.SignedHeaderVals = nil - return - } - - req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTime -} - -const logSignInfoMsg = `DEBUG: Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *Signer) logSigningInfo(ctx *signingCtx) { - signedURLMsg := "" - if ctx.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (ctx *signingCtx) build(disableHeaderHoisting bool) error { - ctx.buildTime() // no depends - ctx.buildCredentialString() // no depends - - if err := ctx.buildBodyDigest(); err != nil { - return err - } - - unsignedHeaders := ctx.Request.Header - if ctx.isPresign { - if !disableHeaderHoisting { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - ctx.Query[k] = urlValues[k] - } - } - } - - ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - ctx.buildCanonicalString() // depends on canon headers / signed headers - ctx.buildStringToSign() // depends on canon string - ctx.buildSignature() // depends on string to sign - - if ctx.isPresign { - ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, - "SignedHeaders=" + ctx.signedHeaders, - authHeaderSignatureElem + ctx.signature, - } - ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) - } - - return nil -} - -// GetSignedRequestSignature attempts to extract the signature of the request. -// Returning an error if the request is unsigned, or unable to extract the -// signature. -func GetSignedRequestSignature(r *http.Request) ([]byte, error) { - - if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { - ps := strings.Split(auth, ", ") - for _, p := range ps { - if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { - sig := p[len(authHeaderSignatureElem):] - if len(sig) == 0 { - return nil, fmt.Errorf("invalid request signature authorization header") - } - return hex.DecodeString(sig) - } - } - } - - if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { - return hex.DecodeString(sig) - } - - return nil, fmt.Errorf("request not signed") -} - -func (ctx *signingCtx) buildTime() { - if ctx.isPresign { - duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) - ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) - } -} - -func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) - - if ctx.isPresign { - ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - if !r.IsValid(k) { - continue // ignored header - } - if ctx.SignedHeaderVals == nil { - ctx.SignedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { - // include additional values - ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - ctx.SignedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - ctx.signedHeaders = strings.Join(headers, ";") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - if ctx.Request.Host != "" { - headerValues[i] = "host:" + ctx.Request.Host - } else { - headerValues[i] = "host:" + ctx.Request.URL.Host - } - } else { - headerValues[i] = k + ":" + - strings.Join(ctx.SignedHeaderVals[k], ",") - } - } - stripExcessSpaces(headerValues) - ctx.canonicalHeaders = strings.Join(headerValues, "\n") -} - -func (ctx *signingCtx) buildCanonicalString() { - ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) - - uri := getURIPath(ctx.Request.URL) - - if !ctx.DisableURIPathEscaping { - uri = rest.EscapePath(uri, false) - } - - ctx.canonicalString = strings.Join([]string{ - ctx.Request.Method, - uri, - ctx.Request.URL.RawQuery, - ctx.canonicalHeaders + "\n", - ctx.signedHeaders, - ctx.bodyDigest, - }, "\n") -} - -func (ctx *signingCtx) buildStringToSign() { - ctx.stringToSign = strings.Join([]string{ - authHeaderPrefix, - formatTime(ctx.Time), - ctx.credentialString, - hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), - }, "\n") -} - -func (ctx *signingCtx) buildSignature() { - creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) - signature := hmacSHA256(creds, []byte(ctx.stringToSign)) - ctx.signature = hex.EncodeToString(signature) -} - -func (ctx *signingCtx) buildBodyDigest() error { - hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - includeSHA256Header := ctx.unsignedPayload || - ctx.ServiceName == "s3" || - ctx.ServiceName == "glacier" - - s3Presign := ctx.isPresign && ctx.ServiceName == "s3" - - if ctx.unsignedPayload || s3Presign { - hash = "UNSIGNED-PAYLOAD" - includeSHA256Header = !s3Presign - } else if ctx.Body == nil { - hash = emptyStringSHA256 - } else { - if !aws.IsReaderSeekable(ctx.Body) { - return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) - } - hashBytes, err := makeSha256Reader(ctx.Body) - if err != nil { - return err - } - hash = hex.EncodeToString(hashBytes) - } - - if includeSHA256Header { - ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) - } - } - ctx.bodyDigest = hash - - return nil -} - -// isRequestSigned returns if the request is currently signed or presigned -func (ctx *signingCtx) isRequestSigned() bool { - if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { - return true - } - if ctx.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (ctx *signingCtx) removePresign() { - ctx.Query.Del("X-Amz-Algorithm") - ctx.Query.Del("X-Amz-Signature") - ctx.Query.Del("X-Amz-Security-Token") - ctx.Query.Del("X-Amz-Date") - ctx.Query.Del("X-Amz-Expires") - ctx.Query.Del("X-Amz-Credential") - ctx.Query.Del("X-Amz-SignedHeaders") -} - -func hmacSHA256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func hashSHA256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { - hash := sha256.New() - start, err := reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - return nil, err - } - defer func() { - // ensure error is return if unable to seek back to start of payload. - _, err = reader.Seek(start, sdkio.SeekStart) - }() - - // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies - // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. - size, err := aws.SeekerLen(reader) - if err != nil { - io.Copy(hash, reader) - } else { - io.CopyN(hash, reader, size) - } - - return hash.Sum(nil), nil -} - -const doubleSpace = " " - -// stripExcessSpaces will rewrite the passed in slice's string values to not -// contain multiple side-by-side spaces. -func stripExcessSpaces(vals []string) { - var j, k, l, m, spaces int - for i, str := range vals { - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - vals[i] = str - continue - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - vals[i] = string(buf[:m]) - } -} - -func buildSigningScope(region, service string, dt time.Time) string { - return strings.Join([]string{ - formatShortTime(dt), - region, - service, - awsV4Request, - }, "/") -} - -func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { - kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) - kRegion := hmacSHA256(kDate, []byte(region)) - kService := hmacSHA256(kRegion, []byte(service)) - signingKey := hmacSHA256(kService, []byte(awsV4Request)) - return signingKey -} - -func formatShortTime(dt time.Time) string { - return dt.UTC().Format(shortTimeFormat) -} - -func formatTime(dt time.Time) string { - return dt.UTC().Format(timeFormat) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index d542ef01..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,241 +0,0 @@ -package aws - -import ( - "io" - "strings" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the -// SDK to accept an io.Reader that is not also an io.Seeker for unsigned -// streaming payload API operations. -// -// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API -// operation's input will prevent that operation being retried in the case of -// network errors, and cause operation requests to fail if the operation -// requires payload signing. -// -// Note: If using With S3 PutObject to stream an object upload The SDK's S3 -// Upload manager (s3manager.Uploader) provides support for streaming with the -// ability to retry network errors. -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// IsReaderSeekable returns if the underlying reader type can be seeked. A -// io.Reader might not actually be seekable if it is the ReaderSeekerCloser -// type. -func IsReaderSeekable(r io.Reader) bool { - switch v := r.(type) { - case ReaderSeekerCloser: - return v.IsSeeker() - case *ReaderSeekerCloser: - return v.IsSeeker() - case io.ReadSeeker: - return true - default: - return false - } -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be -// returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// IsSeeker returns if the underlying reader is also a seeker. -func (r ReaderSeekerCloser) IsSeeker() bool { - _, ok := r.r.(io.Seeker) - return ok -} - -// HasLen returns the length of the underlying reader if the value implements -// the Len() int method. -func (r ReaderSeekerCloser) HasLen() (int, bool) { - type lenner interface { - Len() int - } - - if lr, ok := r.r.(lenner); ok { - return lr.Len(), true - } - - return 0, false -} - -// GetLen returns the length of the bytes remaining in the underlying reader. -// Checks first for Len(), then io.Seeker to determine the size of the -// underlying reader. -// -// Will return -1 if the length cannot be determined. -func (r ReaderSeekerCloser) GetLen() (int64, error) { - if l, ok := r.HasLen(); ok { - return int64(l), nil - } - - if s, ok := r.r.(io.Seeker); ok { - return seekerLen(s) - } - - return -1, nil -} - -// SeekerLen attempts to get the number of bytes remaining at the seeker's -// current position. Returns the number of bytes remaining or error. -func SeekerLen(s io.Seeker) (int64, error) { - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := s.(type) { - case ReaderSeekerCloser: - return v.GetLen() - case *ReaderSeekerCloser: - return v.GetLen() - } - - return seekerLen(s) -} - -func seekerLen(s io.Seeker) (int64, error) { - curOffset, err := s.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - endOffset, err := s.Seek(0, sdkio.SeekEnd) - if err != nil { - return 0, err - } - - _, err = s.Seek(curOffset, sdkio.SeekStart) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf -} - -// MultiCloser is a utility to close multiple io.Closers within a single -// statement. -type MultiCloser []io.Closer - -// Close closes all of the io.Closers making up the MultiClosers. Any -// errors that occur while closing will be returned in the order they -// occur. -func (m MultiCloser) Close() error { - var errs errors - for _, c := range m { - err := c.Close() - if err != nil { - errs = append(errs, err) - } - } - if len(errs) != 0 { - return errs - } - - return nil -} - -type errors []error - -func (es errors) Error() string { - var parts []string - for _, e := range es { - parts = append(parts, e.Error()) - } - - return strings.Join(parts, "\n") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go deleted file mode 100644 index 6192b245..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.8 - -package aws - -import "net/url" - -// URLHostname will extract the Hostname without port from the URL value. -// -// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. -func URLHostname(url *url.URL) string { - return url.Hostname() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go deleted file mode 100644 index 0210d272..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !go1.8 - -package aws - -import ( - "net/url" - "strings" -) - -// URLHostname will extract the Hostname without port from the URL value. -// -// Copy of Go 1.8's net/url#URL.Hostname functionality. -func URLHostname(url *url.URL) string { - return stripPort(url.Host) - -} - -// stripPort is copy of Go 1.8 url#URL.Hostname functionality. -// https://golang.org/src/net/url/url.go -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go deleted file mode 100644 index 34d4d1ee..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "1.28.11" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go deleted file mode 100644 index e83a9988..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go +++ /dev/null @@ -1,120 +0,0 @@ -package ini - -// ASTKind represents different states in the parse table -// and the type of AST that is being constructed -type ASTKind int - -// ASTKind* is used in the parse table to transition between -// the different states -const ( - ASTKindNone = ASTKind(iota) - ASTKindStart - ASTKindExpr - ASTKindEqualExpr - ASTKindStatement - ASTKindSkipStatement - ASTKindExprStatement - ASTKindSectionStatement - ASTKindNestedSectionStatement - ASTKindCompletedNestedSectionStatement - ASTKindCommentStatement - ASTKindCompletedSectionStatement -) - -func (k ASTKind) String() string { - switch k { - case ASTKindNone: - return "none" - case ASTKindStart: - return "start" - case ASTKindExpr: - return "expr" - case ASTKindStatement: - return "stmt" - case ASTKindSectionStatement: - return "section_stmt" - case ASTKindExprStatement: - return "expr_stmt" - case ASTKindCommentStatement: - return "comment" - case ASTKindNestedSectionStatement: - return "nested_section_stmt" - case ASTKindCompletedSectionStatement: - return "completed_stmt" - case ASTKindSkipStatement: - return "skip" - default: - return "" - } -} - -// AST interface allows us to determine what kind of node we -// are on and casting may not need to be necessary. -// -// The root is always the first node in Children -type AST struct { - Kind ASTKind - Root Token - RootToken bool - Children []AST -} - -func newAST(kind ASTKind, root AST, children ...AST) AST { - return AST{ - Kind: kind, - Children: append([]AST{root}, children...), - } -} - -func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { - return AST{ - Kind: kind, - Root: root, - RootToken: true, - Children: children, - } -} - -// AppendChild will append to the list of children an AST has. -func (a *AST) AppendChild(child AST) { - a.Children = append(a.Children, child) -} - -// GetRoot will return the root AST which can be the first entry -// in the children list or a token. -func (a *AST) GetRoot() AST { - if a.RootToken { - return *a - } - - if len(a.Children) == 0 { - return AST{} - } - - return a.Children[0] -} - -// GetChildren will return the current AST's list of children -func (a *AST) GetChildren() []AST { - if len(a.Children) == 0 { - return []AST{} - } - - if a.RootToken { - return a.Children - } - - return a.Children[1:] -} - -// SetChildren will set and override all children of the AST. -func (a *AST) SetChildren(children []AST) { - if a.RootToken { - a.Children = children - } else { - a.Children = append(a.Children[:1], children...) - } -} - -// Start is used to indicate the starting state of the parse table. -var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go deleted file mode 100644 index 0895d53c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go +++ /dev/null @@ -1,11 +0,0 @@ -package ini - -var commaRunes = []rune(",") - -func isComma(b rune) bool { - return b == ',' -} - -func newCommaToken() Token { - return newToken(TokenComma, commaRunes, NoneType) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go deleted file mode 100644 index 0b76999b..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// isComment will return whether or not the next byte(s) is a -// comment. -func isComment(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case ';': - return true - case '#': - return true - } - - return false -} - -// newCommentToken will create a comment token and -// return how many bytes were read. -func newCommentToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if b[i] == '\n' { - break - } - - if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { - break - } - } - - return newToken(TokenComment, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go deleted file mode 100644 index 25ce0fe1..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package ini is an LL(1) parser for configuration files. -// -// Example: -// sections, err := ini.OpenFile("/path/to/file") -// if err != nil { -// panic(err) -// } -// -// profile := "foo" -// section, ok := sections.GetSection(profile) -// if !ok { -// fmt.Printf("section %q could not be found", profile) -// } -// -// Below is the BNF that describes this parser -// Grammar: -// stmt -> value stmt' -// stmt' -> epsilon | op stmt -// value -> number | string | boolean | quoted_string -// -// section -> [ section' -// section' -> value section_close -// section_close -> ] -// -// SkipState will skip (NL WS)+ -// -// comment -> # comment' | ; comment' -// comment' -> epsilon | value -package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go deleted file mode 100644 index 04345a54..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go +++ /dev/null @@ -1,4 +0,0 @@ -package ini - -// emptyToken is used to satisfy the Token interface -var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go deleted file mode 100644 index 91ba2a59..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -// newExpression will return an expression AST. -// Expr represents an expression -// -// grammar: -// expr -> string | number -func newExpression(tok Token) AST { - return newASTWithRootToken(ASTKindExpr, tok) -} - -func newEqualExpr(left AST, tok Token) AST { - return newASTWithRootToken(ASTKindEqualExpr, tok, left) -} - -// EqualExprKey will return a LHS value in the equal expr -func EqualExprKey(ast AST) string { - children := ast.GetChildren() - if len(children) == 0 || ast.Kind != ASTKindEqualExpr { - return "" - } - - return string(children[0].Root.Raw()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go deleted file mode 100644 index 8d462f77..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build gofuzz - -package ini - -import ( - "bytes" -) - -func Fuzz(data []byte) int { - b := bytes.NewReader(data) - - if _, err := Parse(b); err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go deleted file mode 100644 index 3b0ca7af..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go +++ /dev/null @@ -1,51 +0,0 @@ -package ini - -import ( - "io" - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// OpenFile takes a path to a given file, and will open and parse -// that file. -func OpenFile(path string) (Sections, error) { - f, err := os.Open(path) - if err != nil { - return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) - } - defer f.Close() - - return Parse(f) -} - -// Parse will parse the given file using the shared config -// visitor. -func Parse(f io.Reader) (Sections, error) { - tree, err := ParseAST(f) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} - -// ParseBytes will parse the given bytes and return the parsed sections. -func ParseBytes(b []byte) (Sections, error) { - tree, err := ParseASTBytes(b) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go deleted file mode 100644 index 582c024a..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go +++ /dev/null @@ -1,165 +0,0 @@ -package ini - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // ErrCodeUnableToReadFile is used when a file is failed to be - // opened or read from. - ErrCodeUnableToReadFile = "FailedRead" -) - -// TokenType represents the various different tokens types -type TokenType int - -func (t TokenType) String() string { - switch t { - case TokenNone: - return "none" - case TokenLit: - return "literal" - case TokenSep: - return "sep" - case TokenOp: - return "op" - case TokenWS: - return "ws" - case TokenNL: - return "newline" - case TokenComment: - return "comment" - case TokenComma: - return "comma" - default: - return "" - } -} - -// TokenType enums -const ( - TokenNone = TokenType(iota) - TokenLit - TokenSep - TokenComma - TokenOp - TokenWS - TokenNL - TokenComment -) - -type iniLexer struct{} - -// Tokenize will return a list of tokens during lexical analysis of the -// io.Reader. -func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) - } - - return l.tokenize(b) -} - -func (l *iniLexer) tokenize(b []byte) ([]Token, error) { - runes := bytes.Runes(b) - var err error - n := 0 - tokenAmount := countTokens(runes) - tokens := make([]Token, tokenAmount) - count := 0 - - for len(runes) > 0 && count < tokenAmount { - switch { - case isWhitespace(runes[0]): - tokens[count], n, err = newWSToken(runes) - case isComma(runes[0]): - tokens[count], n = newCommaToken(), 1 - case isComment(runes): - tokens[count], n, err = newCommentToken(runes) - case isNewline(runes): - tokens[count], n, err = newNewlineToken(runes) - case isSep(runes): - tokens[count], n, err = newSepToken(runes) - case isOp(runes): - tokens[count], n, err = newOpToken(runes) - default: - tokens[count], n, err = newLitToken(runes) - } - - if err != nil { - return nil, err - } - - count++ - - runes = runes[n:] - } - - return tokens[:count], nil -} - -func countTokens(runes []rune) int { - count, n := 0, 0 - var err error - - for len(runes) > 0 { - switch { - case isWhitespace(runes[0]): - _, n, err = newWSToken(runes) - case isComma(runes[0]): - _, n = newCommaToken(), 1 - case isComment(runes): - _, n, err = newCommentToken(runes) - case isNewline(runes): - _, n, err = newNewlineToken(runes) - case isSep(runes): - _, n, err = newSepToken(runes) - case isOp(runes): - _, n, err = newOpToken(runes) - default: - _, n, err = newLitToken(runes) - } - - if err != nil { - return 0 - } - - count++ - runes = runes[n:] - } - - return count + 1 -} - -// Token indicates a metadata about a given value. -type Token struct { - t TokenType - ValueType ValueType - base int - raw []rune -} - -var emptyValue = Value{} - -func newToken(t TokenType, raw []rune, v ValueType) Token { - return Token{ - t: t, - raw: raw, - ValueType: v, - } -} - -// Raw return the raw runes that were consumed -func (tok Token) Raw() []rune { - return tok.raw -} - -// Type returns the token type -func (tok Token) Type() TokenType { - return tok.t -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go deleted file mode 100644 index cf9fad81..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ /dev/null @@ -1,356 +0,0 @@ -package ini - -import ( - "fmt" - "io" -) - -// State enums for the parse table -const ( - InvalidState = iota - // stmt -> value stmt' - StatementState - // stmt' -> MarkComplete | op stmt - StatementPrimeState - // value -> number | string | boolean | quoted_string - ValueState - // section -> [ section' - OpenScopeState - // section' -> value section_close - SectionState - // section_close -> ] - CloseScopeState - // SkipState will skip (NL WS)+ - SkipState - // SkipTokenState will skip any token and push the previous - // state onto the stack. - SkipTokenState - // comment -> # comment' | ; comment' - // comment' -> MarkComplete | value - CommentState - // MarkComplete state will complete statements and move that - // to the completed AST list - MarkCompleteState - // TerminalState signifies that the tokens have been fully parsed - TerminalState -) - -// parseTable is a state machine to dictate the grammar above. -var parseTable = map[ASTKind]map[TokenType]int{ - ASTKindStart: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, - ASTKindCommentStatement: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExpr: map[TokenType]int{ - TokenOp: StatementPrimeState, - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenWS: ValueState, - TokenNL: SkipState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindEqualExpr: map[TokenType]int{ - TokenLit: ValueState, - TokenWS: SkipTokenState, - TokenNL: SkipState, - }, - ASTKindStatement: map[TokenType]int{ - TokenLit: SectionState, - TokenSep: CloseScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExprStatement: map[TokenType]int{ - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenOp: ValueState, - TokenWS: ValueState, - TokenNL: MarkCompleteState, - TokenComment: CommentState, - TokenNone: TerminalState, - TokenComma: SkipState, - }, - ASTKindSectionStatement: map[TokenType]int{ - TokenLit: SectionState, - TokenOp: SectionState, - TokenSep: CloseScopeState, - TokenWS: SectionState, - TokenNL: SkipTokenState, - }, - ASTKindCompletedSectionStatement: map[TokenType]int{ - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindSkipStatement: map[TokenType]int{ - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, -} - -// ParseAST will parse input from an io.Reader using -// an LL(1) parser. -func ParseAST(r io.Reader) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.Tokenize(r) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -// ParseASTBytes will parse input from a byte slice using -// an LL(1) parser. -func ParseASTBytes(b []byte) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.tokenize(b) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -func parse(tokens []Token) ([]AST, error) { - start := Start - stack := newParseStack(3, len(tokens)) - - stack.Push(start) - s := newSkipper() - -loop: - for stack.Len() > 0 { - k := stack.Pop() - - var tok Token - if len(tokens) == 0 { - // this occurs when all the tokens have been processed - // but reduction of what's left on the stack needs to - // occur. - tok = emptyToken - } else { - tok = tokens[0] - } - - step := parseTable[k.Kind][tok.Type()] - if s.ShouldSkip(tok) { - // being in a skip state with no tokens will break out of - // the parse loop since there is nothing left to process. - if len(tokens) == 0 { - break loop - } - // if should skip is true, we skip the tokens until should skip is set to false. - step = SkipTokenState - } - - switch step { - case TerminalState: - // Finished parsing. Push what should be the last - // statement to the stack. If there is anything left - // on the stack, an error in parsing has occurred. - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - break loop - case SkipTokenState: - // When skipping a token, the previous state was popped off the stack. - // To maintain the correct state, the previous state will be pushed - // onto the stack. - stack.Push(k) - case StatementState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - expr := newExpression(tok) - stack.Push(expr) - case StatementPrimeState: - if tok.Type() != TokenOp { - stack.MarkComplete(k) - continue - } - - if k.Kind != ASTKindExpr { - return nil, NewParseError( - fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), - ) - } - - k = trimSpaces(k) - expr := newEqualExpr(k, tok) - stack.Push(expr) - case ValueState: - // ValueState requires the previous state to either be an equal expression - // or an expression statement. - // - // This grammar occurs when the RHS is a number, word, or quoted string. - // equal_expr -> lit op equal_expr' - // equal_expr' -> number | string | quoted_string - // quoted_string -> " quoted_string' - // quoted_string' -> string quoted_string_end - // quoted_string_end -> " - // - // otherwise - // expr_stmt -> equal_expr (expr_stmt')* - // expr_stmt' -> ws S | op S | MarkComplete - // S -> equal_expr' expr_stmt' - switch k.Kind { - case ASTKindEqualExpr: - // assigning a value to some key - k.AppendChild(newExpression(tok)) - stack.Push(newExprStatement(k)) - case ASTKindExpr: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stack.Push(k) - case ASTKindExprStatement: - root := k.GetRoot() - children := root.GetChildren() - if len(children) == 0 { - return nil, NewParseError( - fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), - ) - } - - rhs := children[len(children)-1] - - if rhs.Root.ValueType != QuotedStringType { - rhs.Root.ValueType = StringType - rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) - - } - - children[len(children)-1] = rhs - k.SetChildren(children) - - stack.Push(k) - } - case OpenScopeState: - if !runeCompare(tok.Raw(), openBrace) { - return nil, NewParseError("expected '['") - } - // If OpenScopeState is not at the start, we must mark the previous ast as complete - // - // for example: if previous ast was a skip statement; - // we should mark it as complete before we create a new statement - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - stmt := newStatement() - stack.Push(stmt) - case CloseScopeState: - if !runeCompare(tok.Raw(), closeBrace) { - return nil, NewParseError("expected ']'") - } - - k = trimSpaces(k) - stack.Push(newCompletedSectionStatement(k)) - case SectionState: - var stmt AST - - switch k.Kind { - case ASTKindStatement: - // If there are multiple literals inside of a scope declaration, - // then the current token's raw value will be appended to the Name. - // - // This handles cases like [ profile default ] - // - // k will represent a SectionStatement with the children representing - // the label of the section - stmt = newSectionStatement(tok) - case ASTKindSectionStatement: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stmt = k - default: - return nil, NewParseError( - fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), - ) - } - - stack.Push(stmt) - case MarkCompleteState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - if stack.Len() == 0 { - stack.Push(start) - } - case SkipState: - stack.Push(newSkipStatement(k)) - s.Skip() - case CommentState: - if k.Kind == ASTKindStart { - stack.Push(k) - } else { - stack.MarkComplete(k) - } - - stmt := newCommentStatement(tok) - stack.Push(stmt) - default: - return nil, NewParseError( - fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", - k, tok.Type())) - } - - if len(tokens) > 0 { - tokens = tokens[1:] - } - } - - // this occurs when a statement has not been completed - if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) - } - - // returns a sublist which excludes the start symbol - return stack.List(), nil -} - -// trimSpaces will trim spaces on the left and right hand side of -// the literal. -func trimSpaces(k AST) AST { - // trim left hand side of spaces - for i := 0; i < len(k.Root.raw); i++ { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[1:] - i-- - } - - // trim right hand side of spaces - for i := len(k.Root.raw) - 1; i >= 0; i-- { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] - } - - return k -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go deleted file mode 100644 index 24df543d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ /dev/null @@ -1,324 +0,0 @@ -package ini - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - runesTrue = []rune("true") - runesFalse = []rune("false") -) - -var literalValues = [][]rune{ - runesTrue, - runesFalse, -} - -func isBoolValue(b []rune) bool { - for _, lv := range literalValues { - if isLitValue(lv, b) { - return true - } - } - return false -} - -func isLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != have[i] { - return false - } - } - - return true -} - -// isNumberValue will return whether not the leading characters in -// a byte slice is a number. A number is delimited by whitespace or -// the newline token. -// -// A number is defined to be in a binary, octal, decimal (int | float), hex format, -// or in scientific notation. -func isNumberValue(b []rune) bool { - negativeIndex := 0 - helper := numberHelper{} - needDigit := false - - for i := 0; i < len(b); i++ { - negativeIndex++ - - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return false - } - helper.Determine(b[i]) - needDigit = true - continue - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return false - } - negativeIndex = 0 - needDigit = true - continue - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - needDigit = true - if i == 0 { - return false - } - - fallthrough - case '.': - if err := helper.Determine(b[i]); err != nil { - return false - } - needDigit = true - continue - } - - if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { - return !needDigit - } - - if !helper.CorrectByte(b[i]) { - return false - } - needDigit = false - } - - return !needDigit -} - -func isValid(b []rune) (bool, int, error) { - if len(b) == 0 { - // TODO: should probably return an error - return false, 0, nil - } - - return isValidRune(b[0]), 1, nil -} - -func isValidRune(r rune) bool { - return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' -} - -// ValueType is an enum that will signify what type -// the Value is -type ValueType int - -func (v ValueType) String() string { - switch v { - case NoneType: - return "NONE" - case DecimalType: - return "FLOAT" - case IntegerType: - return "INT" - case StringType: - return "STRING" - case BoolType: - return "BOOL" - } - - return "" -} - -// ValueType enums -const ( - NoneType = ValueType(iota) - DecimalType - IntegerType - StringType - QuotedStringType - BoolType -) - -// Value is a union container -type Value struct { - Type ValueType - raw []rune - - integer int64 - decimal float64 - boolean bool - str string -} - -func newValue(t ValueType, base int, raw []rune) (Value, error) { - v := Value{ - Type: t, - raw: raw, - } - var err error - - switch t { - case DecimalType: - v.decimal, err = strconv.ParseFloat(string(raw), 64) - case IntegerType: - if base != 10 { - raw = raw[2:] - } - - v.integer, err = strconv.ParseInt(string(raw), base, 64) - case StringType: - v.str = string(raw) - case QuotedStringType: - v.str = string(raw[1 : len(raw)-1]) - case BoolType: - v.boolean = runeCompare(v.raw, runesTrue) - } - - // issue 2253 - // - // if the value trying to be parsed is too large, then we will use - // the 'StringType' and raw value instead. - if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { - v.Type = StringType - v.str = string(raw) - err = nil - } - - return v, err -} - -// Append will append values and change the type to a string -// type. -func (v *Value) Append(tok Token) { - r := tok.Raw() - if v.Type != QuotedStringType { - v.Type = StringType - r = tok.raw[1 : len(tok.raw)-1] - } - if tok.Type() != TokenLit { - v.raw = append(v.raw, tok.Raw()...) - } else { - v.raw = append(v.raw, r...) - } -} - -func (v Value) String() string { - switch v.Type { - case DecimalType: - return fmt.Sprintf("decimal: %f", v.decimal) - case IntegerType: - return fmt.Sprintf("integer: %d", v.integer) - case StringType: - return fmt.Sprintf("string: %s", string(v.raw)) - case QuotedStringType: - return fmt.Sprintf("quoted string: %s", string(v.raw)) - case BoolType: - return fmt.Sprintf("bool: %t", v.boolean) - default: - return "union not set" - } -} - -func newLitToken(b []rune) (Token, int, error) { - n := 0 - var err error - - token := Token{} - if b[0] == '"' { - n, err = getStringValue(b) - if err != nil { - return token, n, err - } - - token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) - } else { - n, err = getValue(b) - token = newToken(TokenLit, b[:n], StringType) - } - - return token, n, err -} - -// IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer -} - -// FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal -} - -// BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean -} - -func isTrimmable(r rune) bool { - switch r { - case '\n', ' ': - return true - } - return false -} - -// StringValue returns the string value -func (v Value) StringValue() string { - switch v.Type { - case StringType: - return strings.TrimFunc(string(v.raw), isTrimmable) - case QuotedStringType: - // preserve all characters in the quotes - return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) - default: - return strings.TrimFunc(string(v.raw), isTrimmable) - } -} - -func contains(runes []rune, c rune) bool { - for i := 0; i < len(runes); i++ { - if runes[i] == c { - return true - } - } - - return false -} - -func runeCompare(v1 []rune, v2 []rune) bool { - if len(v1) != len(v2) { - return false - } - - for i := 0; i < len(v1); i++ { - if v1[i] != v2[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go deleted file mode 100644 index e52ac399..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go +++ /dev/null @@ -1,30 +0,0 @@ -package ini - -func isNewline(b []rune) bool { - if len(b) == 0 { - return false - } - - if b[0] == '\n' { - return true - } - - if len(b) < 2 { - return false - } - - return b[0] == '\r' && b[1] == '\n' -} - -func newNewlineToken(b []rune) (Token, int, error) { - i := 1 - if b[0] == '\r' && isNewline(b[1:]) { - i++ - } - - if !isNewline([]rune(b[:i])) { - return emptyToken, 0, NewParseError("invalid new line token") - } - - return newToken(TokenNL, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go deleted file mode 100644 index a45c0bc5..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go +++ /dev/null @@ -1,152 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" - "strconv" -) - -const ( - none = numberFormat(iota) - binary - octal - decimal - hex - exponent -) - -type numberFormat int - -// numberHelper is used to dictate what format a number is in -// and what to do for negative values. Since -1e-4 is a valid -// number, we cannot just simply check for duplicate negatives. -type numberHelper struct { - numberFormat numberFormat - - negative bool - negativeExponent bool -} - -func (b numberHelper) Exists() bool { - return b.numberFormat != none -} - -func (b numberHelper) IsNegative() bool { - return b.negative || b.negativeExponent -} - -func (b *numberHelper) Determine(c rune) error { - if b.Exists() { - return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) - } - - switch c { - case 'b': - b.numberFormat = binary - case 'o': - b.numberFormat = octal - case 'x': - b.numberFormat = hex - case 'e', 'E': - b.numberFormat = exponent - case '-': - if b.numberFormat != exponent { - b.negative = true - } else { - b.negativeExponent = true - } - case '.': - b.numberFormat = decimal - default: - return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) - } - - return nil -} - -func (b numberHelper) CorrectByte(c rune) bool { - switch { - case b.numberFormat == binary: - if !isBinaryByte(c) { - return false - } - case b.numberFormat == octal: - if !isOctalByte(c) { - return false - } - case b.numberFormat == hex: - if !isHexByte(c) { - return false - } - case b.numberFormat == decimal: - if !isDigit(c) { - return false - } - case b.numberFormat == exponent: - if !isDigit(c) { - return false - } - case b.negativeExponent: - if !isDigit(c) { - return false - } - case b.negative: - if !isDigit(c) { - return false - } - default: - if !isDigit(c) { - return false - } - } - - return true -} - -func (b numberHelper) Base() int { - switch b.numberFormat { - case binary: - return 2 - case octal: - return 8 - case hex: - return 16 - default: - return 10 - } -} - -func (b numberHelper) String() string { - buf := bytes.Buffer{} - i := 0 - - switch b.numberFormat { - case binary: - i++ - buf.WriteString(strconv.Itoa(i) + ": binary format\n") - case octal: - i++ - buf.WriteString(strconv.Itoa(i) + ": octal format\n") - case hex: - i++ - buf.WriteString(strconv.Itoa(i) + ": hex format\n") - case exponent: - i++ - buf.WriteString(strconv.Itoa(i) + ": exponent format\n") - default: - i++ - buf.WriteString(strconv.Itoa(i) + ": integer format\n") - } - - if b.negative { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative format\n") - } - - if b.negativeExponent { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go deleted file mode 100644 index 8a84c7cb..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go +++ /dev/null @@ -1,39 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - equalOp = []rune("=") - equalColonOp = []rune(":") -) - -func isOp(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '=': - return true - case ':': - return true - default: - return false - } -} - -func newOpToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '=': - tok = newToken(TokenOp, equalOp, NoneType) - case ':': - tok = newToken(TokenOp, equalColonOp, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go deleted file mode 100644 index 45728701..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go +++ /dev/null @@ -1,43 +0,0 @@ -package ini - -import "fmt" - -const ( - // ErrCodeParseError is returned when a parsing error - // has occurred. - ErrCodeParseError = "INIParseError" -) - -// ParseError is an error which is returned during any part of -// the parsing process. -type ParseError struct { - msg string -} - -// NewParseError will return a new ParseError where message -// is the description of the error. -func NewParseError(message string) *ParseError { - return &ParseError{ - msg: message, - } -} - -// Code will return the ErrCodeParseError -func (err *ParseError) Code() string { - return ErrCodeParseError -} - -// Message returns the error's message -func (err *ParseError) Message() string { - return err.msg -} - -// OrigError return nothing since there will never be any -// original error. -func (err *ParseError) OrigError() error { - return nil -} - -func (err *ParseError) Error() string { - return fmt.Sprintf("%s: %s", err.Code(), err.Message()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go deleted file mode 100644 index 7f01cf7c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go +++ /dev/null @@ -1,60 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" -) - -// ParseStack is a stack that contains a container, the stack portion, -// and the list which is the list of ASTs that have been successfully -// parsed. -type ParseStack struct { - top int - container []AST - list []AST - index int -} - -func newParseStack(sizeContainer, sizeList int) ParseStack { - return ParseStack{ - container: make([]AST, sizeContainer), - list: make([]AST, sizeList), - } -} - -// Pop will return and truncate the last container element. -func (s *ParseStack) Pop() AST { - s.top-- - return s.container[s.top] -} - -// Push will add the new AST to the container -func (s *ParseStack) Push(ast AST) { - s.container[s.top] = ast - s.top++ -} - -// MarkComplete will append the AST to the list of completed statements -func (s *ParseStack) MarkComplete(ast AST) { - s.list[s.index] = ast - s.index++ -} - -// List will return the completed statements -func (s ParseStack) List() []AST { - return s.list[:s.index] -} - -// Len will return the length of the container -func (s *ParseStack) Len() int { - return s.top -} - -func (s ParseStack) String() string { - buf := bytes.Buffer{} - for i, node := range s.list { - buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go deleted file mode 100644 index f82095ba..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go +++ /dev/null @@ -1,41 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - emptyRunes = []rune{} -) - -func isSep(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '[', ']': - return true - default: - return false - } -} - -var ( - openBrace = []rune("[") - closeBrace = []rune("]") -) - -func newSepToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '[': - tok = newToken(TokenSep, openBrace, NoneType) - case ']': - tok = newToken(TokenSep, closeBrace, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go deleted file mode 100644 index da7a4049..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go +++ /dev/null @@ -1,45 +0,0 @@ -package ini - -// skipper is used to skip certain blocks of an ini file. -// Currently skipper is used to skip nested blocks of ini -// files. See example below -// -// [ foo ] -// nested = ; this section will be skipped -// a=b -// c=d -// bar=baz ; this will be included -type skipper struct { - shouldSkip bool - TokenSet bool - prevTok Token -} - -func newSkipper() skipper { - return skipper{ - prevTok: emptyToken, - } -} - -func (s *skipper) ShouldSkip(tok Token) bool { - // should skip state will be modified only if previous token was new line (NL); - // and the current token is not WhiteSpace (WS). - if s.shouldSkip && - s.prevTok.Type() == TokenNL && - tok.Type() != TokenWS { - s.Continue() - return false - } - s.prevTok = tok - return s.shouldSkip -} - -func (s *skipper) Skip() { - s.shouldSkip = true -} - -func (s *skipper) Continue() { - s.shouldSkip = false - // empty token is assigned as we return to default state, when should skip is false - s.prevTok = emptyToken -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go deleted file mode 100644 index 18f3fe89..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// Statement is an empty AST mostly used for transitioning states. -func newStatement() AST { - return newAST(ASTKindStatement, AST{}) -} - -// SectionStatement represents a section AST -func newSectionStatement(tok Token) AST { - return newASTWithRootToken(ASTKindSectionStatement, tok) -} - -// ExprStatement represents a completed expression AST -func newExprStatement(ast AST) AST { - return newAST(ASTKindExprStatement, ast) -} - -// CommentStatement represents a comment in the ini definition. -// -// grammar: -// comment -> #comment' | ;comment' -// comment' -> epsilon | value -func newCommentStatement(tok Token) AST { - return newAST(ASTKindCommentStatement, newExpression(tok)) -} - -// CompletedSectionStatement represents a completed section -func newCompletedSectionStatement(ast AST) AST { - return newAST(ASTKindCompletedSectionStatement, ast) -} - -// SkipStatement is used to skip whole statements -func newSkipStatement(ast AST) AST { - return newAST(ASTKindSkipStatement, ast) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go deleted file mode 100644 index 305999d2..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go +++ /dev/null @@ -1,284 +0,0 @@ -package ini - -import ( - "fmt" -) - -// getStringValue will return a quoted string and the amount -// of bytes read -// -// an error will be returned if the string is not properly formatted -func getStringValue(b []rune) (int, error) { - if b[0] != '"' { - return 0, NewParseError("strings must start with '\"'") - } - - endQuote := false - i := 1 - - for ; i < len(b) && !endQuote; i++ { - if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { - endQuote = true - break - } else if escaped { - /*c, err := getEscapedByte(b[i]) - if err != nil { - return 0, err - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i--*/ - - continue - } - } - - if !endQuote { - return 0, NewParseError("missing '\"' in string value") - } - - return i + 1, nil -} - -// getBoolValue will return a boolean and the amount -// of bytes read -// -// an error will be returned if the boolean is not of a correct -// value -func getBoolValue(b []rune) (int, error) { - if len(b) < 4 { - return 0, NewParseError("invalid boolean value") - } - - n := 0 - for _, lv := range literalValues { - if len(lv) > len(b) { - continue - } - - if isLitValue(lv, b) { - n = len(lv) - } - } - - if n == 0 { - return 0, NewParseError("invalid boolean value") - } - - return n, nil -} - -// getNumericalValue will return a numerical string, the amount -// of bytes read, and the base of the number -// -// an error will be returned if the number is not of a correct -// value -func getNumericalValue(b []rune) (int, int, error) { - if !isDigit(b[0]) { - return 0, 0, NewParseError("invalid digit value") - } - - i := 0 - helper := numberHelper{} - -loop: - for negativeIndex := 0; i < len(b); i++ { - negativeIndex++ - - if !isDigit(b[i]) { - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return 0, 0, NewParseError("parse error '-'") - } - - n := getNegativeNumber(b[i:]) - i += (n - 1) - helper.Determine(b[i]) - continue - case '.': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - - negativeIndex = 0 - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - if i == 0 && b[i] != '0' { - return 0, 0, NewParseError("incorrect base format, expected leading '0'") - } - - if i != 1 { - return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) - } - - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - default: - if isWhitespace(b[i]) { - break loop - } - - if isNewline(b[i:]) { - break loop - } - - if !(helper.numberFormat == hex && isHexByte(b[i])) { - if i+2 < len(b) && !isNewline(b[i:i+2]) { - return 0, 0, NewParseError("invalid numerical character") - } else if !isNewline([]rune{b[i]}) { - return 0, 0, NewParseError("invalid numerical character") - } - - break loop - } - } - } - } - - return helper.Base(), i, nil -} - -// isDigit will return whether or not something is an integer -func isDigit(b rune) bool { - return b >= '0' && b <= '9' -} - -func hasExponent(v []rune) bool { - return contains(v, 'e') || contains(v, 'E') -} - -func isBinaryByte(b rune) bool { - switch b { - case '0', '1': - return true - default: - return false - } -} - -func isOctalByte(b rune) bool { - switch b { - case '0', '1', '2', '3', '4', '5', '6', '7': - return true - default: - return false - } -} - -func isHexByte(b rune) bool { - if isDigit(b) { - return true - } - return (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - -func getValue(b []rune) (int, error) { - i := 0 - - for i < len(b) { - if isNewline(b[i:]) { - break - } - - if isOp(b[i:]) { - break - } - - valid, n, err := isValid(b[i:]) - if err != nil { - return 0, err - } - - if !valid { - break - } - - i += n - } - - return i, nil -} - -// getNegativeNumber will return a negative number from a -// byte slice. This will iterate through all characters until -// a non-digit has been found. -func getNegativeNumber(b []rune) int { - if b[0] != '-' { - return 0 - } - - i := 1 - for ; i < len(b); i++ { - if !isDigit(b[i]) { - return i - } - } - - return i -} - -// isEscaped will return whether or not the character is an escaped -// character. -func isEscaped(value []rune, b rune) bool { - if len(value) == 0 { - return false - } - - switch b { - case '\'': // single quote - case '"': // quote - case 'n': // newline - case 't': // tab - case '\\': // backslash - default: - return false - } - - return value[len(value)-1] == '\\' -} - -func getEscapedByte(b rune) (rune, error) { - switch b { - case '\'': // single quote - return '\'', nil - case '"': // quote - return '"', nil - case 'n': // newline - return '\n', nil - case 't': // table - return '\t', nil - case '\\': // backslash - return '\\', nil - default: - return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) - } -} - -func removeEscapedCharacters(b []rune) []rune { - for i := 0; i < len(b); i++ { - if isEscaped(b[:i], b[i]) { - c, err := getEscapedByte(b[i]) - if err != nil { - return b - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i-- - } - } - - return b -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go deleted file mode 100644 index 94841c32..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ /dev/null @@ -1,166 +0,0 @@ -package ini - -import ( - "fmt" - "sort" -) - -// Visitor is an interface used by walkers that will -// traverse an array of ASTs. -type Visitor interface { - VisitExpr(AST) error - VisitStatement(AST) error -} - -// DefaultVisitor is used to visit statements and expressions -// and ensure that they are both of the correct format. -// In addition, upon visiting this will build sections and populate -// the Sections field which can be used to retrieve profile -// configuration. -type DefaultVisitor struct { - scope string - Sections Sections -} - -// NewDefaultVisitor return a DefaultVisitor -func NewDefaultVisitor() *DefaultVisitor { - return &DefaultVisitor{ - Sections: Sections{ - container: map[string]Section{}, - }, - } -} - -// VisitExpr visits expressions... -func (v *DefaultVisitor) VisitExpr(expr AST) error { - t := v.Sections.container[v.scope] - if t.values == nil { - t.values = values{} - } - - switch expr.Kind { - case ASTKindExprStatement: - opExpr := expr.GetRoot() - switch opExpr.Kind { - case ASTKindEqualExpr: - children := opExpr.GetChildren() - if len(children) <= 1 { - return NewParseError("unexpected token type") - } - - rhs := children[1] - - if rhs.Root.Type() != TokenLit { - return NewParseError("unexpected token type") - } - - key := EqualExprKey(opExpr) - v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) - if err != nil { - return err - } - - t.values[key] = v - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - - v.Sections.container[v.scope] = t - return nil -} - -// VisitStatement visits statements... -func (v *DefaultVisitor) VisitStatement(stmt AST) error { - switch stmt.Kind { - case ASTKindCompletedSectionStatement: - child := stmt.GetRoot() - if child.Kind != ASTKindSectionStatement { - return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) - } - - name := string(child.Root.Raw()) - v.Sections.container[name] = Section{} - v.scope = name - default: - return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) - } - - return nil -} - -// Sections is a map of Section structures that represent -// a configuration. -type Sections struct { - container map[string]Section -} - -// GetSection will return section p. If section p does not exist, -// false will be returned in the second parameter. -func (t Sections) GetSection(p string) (Section, bool) { - v, ok := t.container[p] - return v, ok -} - -// values represents a map of union values. -type values map[string]Value - -// List will return a list of all sections that were successfully -// parsed. -func (t Sections) List() []string { - keys := make([]string, len(t.container)) - i := 0 - for k := range t.container { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// Section contains a name and values. This represent -// a sectioned entry in a configuration file. -type Section struct { - Name string - values values -} - -// Has will return whether or not an entry exists in a given section -func (t Section) Has(k string) bool { - _, ok := t.values[k] - return ok -} - -// ValueType will returned what type the union is set to. If -// k was not found, the NoneType will be returned. -func (t Section) ValueType(k string) (ValueType, bool) { - v, ok := t.values[k] - return v.Type, ok -} - -// Bool returns a bool value at k -func (t Section) Bool(k string) bool { - return t.values[k].BoolValue() -} - -// Int returns an integer value at k -func (t Section) Int(k string) int64 { - return t.values[k].IntValue() -} - -// Float64 returns a float value at k -func (t Section) Float64(k string) float64 { - return t.values[k].FloatValue() -} - -// String returns the string value at k -func (t Section) String(k string) string { - _, ok := t.values[k] - if !ok { - return "" - } - return t.values[k].StringValue() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go deleted file mode 100644 index 99915f7f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go +++ /dev/null @@ -1,25 +0,0 @@ -package ini - -// Walk will traverse the AST using the v, the Visitor. -func Walk(tree []AST, v Visitor) error { - for _, node := range tree { - switch node.Kind { - case ASTKindExpr, - ASTKindExprStatement: - - if err := v.VisitExpr(node); err != nil { - return err - } - case ASTKindStatement, - ASTKindCompletedSectionStatement, - ASTKindNestedSectionStatement, - ASTKindCompletedNestedSectionStatement: - - if err := v.VisitStatement(node); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go deleted file mode 100644 index 7ffb4ae0..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -import ( - "unicode" -) - -// isWhitespace will return whether or not the character is -// a whitespace character. -// -// Whitespace is defined as a space or tab. -func isWhitespace(c rune) bool { - return unicode.IsSpace(c) && c != '\n' && c != '\r' -} - -func newWSToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if !isWhitespace(b[i]) { - break - } - } - - return newToken(TokenWS, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go deleted file mode 100644 index 0b9b0dfc..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go +++ /dev/null @@ -1,57 +0,0 @@ -package s3err - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RequestFailure provides additional S3 specific metadata for the request -// failure. -type RequestFailure struct { - awserr.RequestFailure - - hostID string -} - -// NewRequestFailure returns a request failure error decordated with S3 -// specific metadata. -func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { - return &RequestFailure{RequestFailure: err, hostID: hostID} -} - -func (r RequestFailure) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", - r.StatusCode(), r.RequestID(), r.hostID) - return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} -func (r RequestFailure) String() string { - return r.Error() -} - -// HostID returns the HostID request response value. -func (r RequestFailure) HostID() string { - return r.hostID -} - -// RequestFailureWrapperHandler returns a handler to rap an -// awserr.RequestFailure with the S3 request ID 2 from the response. -func RequestFailureWrapperHandler() request.NamedHandler { - return request.NamedHandler{ - Name: "awssdk.s3.errorHandler", - Fn: func(req *request.Request) { - reqErr, ok := req.Error.(awserr.RequestFailure) - if !ok || reqErr == nil { - return - } - - hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") - if req.Error == nil { - return - } - - req.Error = NewRequestFailure(reqErr, hostID) - }, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go deleted file mode 100644 index 6c443988..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package sdkio - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go deleted file mode 100644 index 5aa9137e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !go1.7 - -package sdkio - -// Copy of Go 1.7 io package's Seeker constants. -const ( - SeekStart = 0 // seek relative to the origin of the file - SeekCurrent = 1 // seek relative to the current offset - SeekEnd = 2 // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go deleted file mode 100644 index e5f00561..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.7 - -package sdkio - -import "io" - -// Alias for Go 1.7 io package Seeker constants -const ( - SeekStart = io.SeekStart // seek relative to the origin of the file - SeekCurrent = io.SeekCurrent // seek relative to the current offset - SeekEnd = io.SeekEnd // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go deleted file mode 100644 index 44898eed..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.10 - -package sdkmath - -import "math" - -// Round returns the nearest integer, rounding half away from zero. -// -// Special cases are: -// Round(±0) = ±0 -// Round(±Inf) = ±Inf -// Round(NaN) = NaN -func Round(x float64) float64 { - return math.Round(x) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go deleted file mode 100644 index 810ec7f0..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build !go1.10 - -package sdkmath - -import "math" - -// Copied from the Go standard library's (Go 1.12) math/floor.go for use in -// Go version prior to Go 1.10. -const ( - uvone = 0x3FF0000000000000 - mask = 0x7FF - shift = 64 - 11 - 1 - bias = 1023 - signMask = 1 << 63 - fracMask = 1<= 0.5 { - // return t + Copysign(1, x) - // } - // return t - // } - bits := math.Float64bits(x) - e := uint(bits>>shift) & mask - if e < bias { - // Round abs(x) < 1 including denormals. - bits &= signMask // +-0 - if e == bias-1 { - bits |= uvone // +-1 - } - } else if e < bias+shift { - // Round any abs(x) >= 1 containing a fractional component [0,1). - // - // Numbers with larger exponents are returned unchanged since they - // must be either an integer, infinity, or NaN. - const half = 1 << (shift - 1) - e -= bias - bits += half >> e - bits &^= fracMask >> e - } - return math.Float64frombits(bits) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go deleted file mode 100644 index 0c9802d8..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go +++ /dev/null @@ -1,29 +0,0 @@ -package sdkrand - -import ( - "math/rand" - "sync" - "time" -) - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// SeededRand is a new RNG using a thread safe implementation of rand.Source -var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go deleted file mode 100644 index f4651da2..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.6 - -package sdkrand - -import "math/rand" - -// Read provides the stub for math.Rand.Read method support for go version's -// 1.6 and greater. -func Read(r *rand.Rand, p []byte) (int, error) { - return r.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go deleted file mode 100644 index b1d93a33..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !go1.6 - -package sdkrand - -import "math/rand" - -// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 -func Read(r *rand.Rand, p []byte) (n int, err error) { - // Copy of Go standard libraries math package's read function not added to - // standard library until Go 1.6. - var pos int8 - var val int64 - for n = 0; n < len(p); n++ { - if pos == 0 { - val = r.Int63() - pos = 7 - } - p[n] = byte(val) - val >>= 8 - pos-- - } - - return n, err -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go deleted file mode 100644 index 38ea61af..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go +++ /dev/null @@ -1,23 +0,0 @@ -package sdkuri - -import ( - "path" - "strings" -) - -// PathJoin will join the elements of the path delimited by the "/" -// character. Similar to path.Join with the exception the trailing "/" -// character is preserved if present. -func PathJoin(elems ...string) string { - if len(elems) == 0 { - return "" - } - - hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") - str := path.Join(elems...) - if hasTrailing && str != "/" { - str += "/" - } - - return str -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go deleted file mode 100644 index 7da8a49c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go +++ /dev/null @@ -1,12 +0,0 @@ -package shareddefaults - -const ( - // ECSCredsProviderEnvVar is an environmental variable key used to - // determine which path needs to be hit. - ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" -) - -// ECSContainerCredentialsURI is the endpoint to retrieve container -// credentials. This can be overridden to test to ensure the credential process -// is behaving correctly. -var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go deleted file mode 100644 index ebcbc2b4..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ /dev/null @@ -1,40 +0,0 @@ -package shareddefaults - -import ( - "os" - "path/filepath" - "runtime" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "credentials") -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "config") -} - -// UserHomeDir returns the home directory for the user the process is -// running under. -func UserHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") - } - - // *nix - return os.Getenv("HOME") -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go deleted file mode 100644 index d008ae27..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go +++ /dev/null @@ -1,11 +0,0 @@ -package strings - -import ( - "strings" -) - -// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, -// under Unicode case-folding. -func HasPrefixFold(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go deleted file mode 100644 index 15105497..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go +++ /dev/null @@ -1,144 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "strconv" -) - -type decodedMessage struct { - rawMessage - Headers decodedHeaders `json:"headers"` -} -type jsonMessage struct { - Length json.Number `json:"total_length"` - HeadersLen json.Number `json:"headers_length"` - PreludeCRC json.Number `json:"prelude_crc"` - Headers decodedHeaders `json:"headers"` - Payload []byte `json:"payload"` - CRC json.Number `json:"message_crc"` -} - -func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { - var jsonMsg jsonMessage - if err = json.Unmarshal(b, &jsonMsg); err != nil { - return err - } - - d.Length, err = numAsUint32(jsonMsg.Length) - if err != nil { - return err - } - d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) - if err != nil { - return err - } - d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) - if err != nil { - return err - } - d.Headers = jsonMsg.Headers - d.Payload = jsonMsg.Payload - d.CRC, err = numAsUint32(jsonMsg.CRC) - if err != nil { - return err - } - - return nil -} - -func (d *decodedMessage) MarshalJSON() ([]byte, error) { - jsonMsg := jsonMessage{ - Length: json.Number(strconv.Itoa(int(d.Length))), - HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), - PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), - Headers: d.Headers, - Payload: d.Payload, - CRC: json.Number(strconv.Itoa(int(d.CRC))), - } - - return json.Marshal(jsonMsg) -} - -func numAsUint32(n json.Number) (uint32, error) { - v, err := n.Int64() - if err != nil { - return 0, fmt.Errorf("failed to get int64 json number, %v", err) - } - - return uint32(v), nil -} - -func (d decodedMessage) Message() Message { - return Message{ - Headers: Headers(d.Headers), - Payload: d.Payload, - } -} - -type decodedHeaders Headers - -func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { - var jsonHeaders []struct { - Name string `json:"name"` - Type valueType `json:"type"` - Value interface{} `json:"value"` - } - - decoder := json.NewDecoder(bytes.NewReader(b)) - decoder.UseNumber() - if err := decoder.Decode(&jsonHeaders); err != nil { - return err - } - - var headers Headers - for _, h := range jsonHeaders { - value, err := valueFromType(h.Type, h.Value) - if err != nil { - return err - } - headers.Set(h.Name, value) - } - *hs = decodedHeaders(headers) - - return nil -} - -func valueFromType(typ valueType, val interface{}) (Value, error) { - switch typ { - case trueValueType: - return BoolValue(true), nil - case falseValueType: - return BoolValue(false), nil - case int8ValueType: - v, err := val.(json.Number).Int64() - return Int8Value(int8(v)), err - case int16ValueType: - v, err := val.(json.Number).Int64() - return Int16Value(int16(v)), err - case int32ValueType: - v, err := val.(json.Number).Int64() - return Int32Value(int32(v)), err - case int64ValueType: - v, err := val.(json.Number).Int64() - return Int64Value(v), err - case bytesValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - return BytesValue(v), err - case stringValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - return StringValue(string(v)), err - case timestampValueType: - v, err := val.(json.Number).Int64() - return TimestampValue(timeFromEpochMilli(v)), err - case uuidValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - var tv UUIDValue - copy(tv[:], v) - return tv, err - default: - panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go deleted file mode 100644 index 47433939..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go +++ /dev/null @@ -1,216 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "hash/crc32" - "io" - - "github.com/aws/aws-sdk-go/aws" -) - -// Decoder provides decoding of an Event Stream messages. -type Decoder struct { - r io.Reader - logger aws.Logger -} - -// NewDecoder initializes and returns a Decoder for decoding event -// stream messages from the reader provided. -func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { - d := &Decoder{ - r: r, - } - - for _, opt := range opts { - opt(d) - } - - return d -} - -// DecodeWithLogger adds a logger to be used by the decoder when decoding -// stream events. -func DecodeWithLogger(logger aws.Logger) func(*Decoder) { - return func(d *Decoder) { - d.logger = logger - } -} - -// Decode attempts to decode a single message from the event stream reader. -// Will return the event stream message, or error if Decode fails to read -// the message from the stream. -func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { - reader := d.r - if d.logger != nil { - debugMsgBuf := bytes.NewBuffer(nil) - reader = io.TeeReader(reader, debugMsgBuf) - defer func() { - logMessageDecode(d.logger, debugMsgBuf, m, err) - }() - } - - m, err = Decode(reader, payloadBuf) - - return m, err -} - -// Decode attempts to decode a single message from the event stream reader. -// Will return the event stream message, or error if Decode fails to read -// the message from the reader. -func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { - crc := crc32.New(crc32IEEETable) - hashReader := io.TeeReader(reader, crc) - - prelude, err := decodePrelude(hashReader, crc) - if err != nil { - return Message{}, err - } - - if prelude.HeadersLen > 0 { - lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) - m.Headers, err = decodeHeaders(lr) - if err != nil { - return Message{}, err - } - } - - if payloadLen := prelude.PayloadLen(); payloadLen > 0 { - buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) - if err != nil { - return Message{}, err - } - m.Payload = buf - } - - msgCRC := crc.Sum32() - if err := validateCRC(reader, msgCRC); err != nil { - return Message{}, err - } - - return m, nil -} - -func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { - w := bytes.NewBuffer(nil) - defer func() { logger.Log(w.String()) }() - - fmt.Fprintf(w, "Raw message:\n%s\n", - hex.Dump(msgBuf.Bytes())) - - if decodeErr != nil { - fmt.Fprintf(w, "Decode error: %v\n", decodeErr) - return - } - - rawMsg, err := msg.rawMessage() - if err != nil { - fmt.Fprintf(w, "failed to create raw message, %v\n", err) - return - } - - decodedMsg := decodedMessage{ - rawMessage: rawMsg, - Headers: decodedHeaders(msg.Headers), - } - - fmt.Fprintf(w, "Decoded message:\n") - encoder := json.NewEncoder(w) - if err := encoder.Encode(decodedMsg); err != nil { - fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) - } -} - -func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { - var p messagePrelude - - var err error - p.Length, err = decodeUint32(r) - if err != nil { - return messagePrelude{}, err - } - - p.HeadersLen, err = decodeUint32(r) - if err != nil { - return messagePrelude{}, err - } - - if err := p.ValidateLens(); err != nil { - return messagePrelude{}, err - } - - preludeCRC := crc.Sum32() - if err := validateCRC(r, preludeCRC); err != nil { - return messagePrelude{}, err - } - - p.PreludeCRC = preludeCRC - - return p, nil -} - -func decodePayload(buf []byte, r io.Reader) ([]byte, error) { - w := bytes.NewBuffer(buf[0:0]) - - _, err := io.Copy(w, r) - return w.Bytes(), err -} - -func decodeUint8(r io.Reader) (uint8, error) { - type byteReader interface { - ReadByte() (byte, error) - } - - if br, ok := r.(byteReader); ok { - v, err := br.ReadByte() - return uint8(v), err - } - - var b [1]byte - _, err := io.ReadFull(r, b[:]) - return uint8(b[0]), err -} -func decodeUint16(r io.Reader) (uint16, error) { - var b [2]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint16(bs), nil -} -func decodeUint32(r io.Reader) (uint32, error) { - var b [4]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint32(bs), nil -} -func decodeUint64(r io.Reader) (uint64, error) { - var b [8]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint64(bs), nil -} - -func validateCRC(r io.Reader, expect uint32) error { - msgCRC, err := decodeUint32(r) - if err != nil { - return err - } - - if msgCRC != expect { - return ChecksumError{} - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go deleted file mode 100644 index ffade3bc..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go +++ /dev/null @@ -1,162 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "hash/crc32" - "io" - - "github.com/aws/aws-sdk-go/aws" -) - -// Encoder provides EventStream message encoding. -type Encoder struct { - w io.Writer - logger aws.Logger - - headersBuf *bytes.Buffer -} - -// NewEncoder initializes and returns an Encoder to encode Event Stream -// messages to an io.Writer. -func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { - e := &Encoder{ - w: w, - headersBuf: bytes.NewBuffer(nil), - } - - for _, opt := range opts { - opt(e) - } - - return e -} - -// EncodeWithLogger adds a logger to be used by the encode when decoding -// stream events. -func EncodeWithLogger(logger aws.Logger) func(*Encoder) { - return func(d *Encoder) { - d.logger = logger - } -} - -// Encode encodes a single EventStream message to the io.Writer the Encoder -// was created with. An error is returned if writing the message fails. -func (e *Encoder) Encode(msg Message) (err error) { - e.headersBuf.Reset() - - writer := e.w - if e.logger != nil { - encodeMsgBuf := bytes.NewBuffer(nil) - writer = io.MultiWriter(writer, encodeMsgBuf) - defer func() { - logMessageEncode(e.logger, encodeMsgBuf, msg, err) - }() - } - - if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { - return err - } - - crc := crc32.New(crc32IEEETable) - hashWriter := io.MultiWriter(writer, crc) - - headersLen := uint32(e.headersBuf.Len()) - payloadLen := uint32(len(msg.Payload)) - - if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { - return err - } - - if headersLen > 0 { - if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { - return err - } - } - - if payloadLen > 0 { - if _, err = hashWriter.Write(msg.Payload); err != nil { - return err - } - } - - msgCRC := crc.Sum32() - return binary.Write(writer, binary.BigEndian, msgCRC) -} - -func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { - w := bytes.NewBuffer(nil) - defer func() { logger.Log(w.String()) }() - - fmt.Fprintf(w, "Message to encode:\n") - encoder := json.NewEncoder(w) - if err := encoder.Encode(msg); err != nil { - fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) - } - - if encodeErr != nil { - fmt.Fprintf(w, "Encode error: %v\n", encodeErr) - return - } - - fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) -} - -func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { - p := messagePrelude{ - Length: minMsgLen + headersLen + payloadLen, - HeadersLen: headersLen, - } - if err := p.ValidateLens(); err != nil { - return err - } - - err := binaryWriteFields(w, binary.BigEndian, - p.Length, - p.HeadersLen, - ) - if err != nil { - return err - } - - p.PreludeCRC = crc.Sum32() - err = binary.Write(w, binary.BigEndian, p.PreludeCRC) - if err != nil { - return err - } - - return nil -} - -// EncodeHeaders writes the header values to the writer encoded in the event -// stream format. Returns an error if a header fails to encode. -func EncodeHeaders(w io.Writer, headers Headers) error { - for _, h := range headers { - hn := headerName{ - Len: uint8(len(h.Name)), - } - copy(hn.Name[:hn.Len], h.Name) - if err := hn.encode(w); err != nil { - return err - } - - if err := h.Value.encode(w); err != nil { - return err - } - } - - return nil -} - -func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { - for _, v := range vs { - if err := binary.Write(w, order, v); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go deleted file mode 100644 index 5481ef30..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go +++ /dev/null @@ -1,23 +0,0 @@ -package eventstream - -import "fmt" - -// LengthError provides the error for items being larger than a maximum length. -type LengthError struct { - Part string - Want int - Have int - Value interface{} -} - -func (e LengthError) Error() string { - return fmt.Sprintf("%s length invalid, %d/%d, %v", - e.Part, e.Want, e.Have, e.Value) -} - -// ChecksumError provides the error for message checksum invalidation errors. -type ChecksumError struct{} - -func (e ChecksumError) Error() string { - return "message checksum mismatch" -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go deleted file mode 100644 index 34c2e89d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go +++ /dev/null @@ -1,77 +0,0 @@ -package eventstreamapi - -import ( - "fmt" - "sync" -) - -type messageError struct { - code string - msg string -} - -func (e messageError) Code() string { - return e.code -} - -func (e messageError) Message() string { - return e.msg -} - -func (e messageError) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.msg) -} - -func (e messageError) OrigErr() error { - return nil -} - -// OnceError wraps the behavior of recording an error -// once and signal on a channel when this has occurred. -// Signaling is done by closing of the channel. -// -// Type is safe for concurrent usage. -type OnceError struct { - mu sync.RWMutex - err error - ch chan struct{} -} - -// NewOnceError return a new OnceError -func NewOnceError() *OnceError { - return &OnceError{ - ch: make(chan struct{}, 1), - } -} - -// Err acquires a read-lock and returns an -// error if one has been set. -func (e *OnceError) Err() error { - e.mu.RLock() - err := e.err - e.mu.RUnlock() - - return err -} - -// SetError acquires a write-lock and will set -// the underlying error value if one has not been set. -func (e *OnceError) SetError(err error) { - if err == nil { - return - } - - e.mu.Lock() - if e.err == nil { - e.err = err - close(e.ch) - } - e.mu.Unlock() -} - -// ErrorSet returns a channel that will be used to signal -// that an error has been set. This channel will be closed -// when the error value has been set for OnceError. -func (e *OnceError) ErrorSet() <-chan struct{} { - return e.ch -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go deleted file mode 100644 index bb8ea5da..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go +++ /dev/null @@ -1,160 +0,0 @@ -package eventstreamapi - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/eventstream" -) - -// Unmarshaler provides the interface for unmarshaling a EventStream -// message into a SDK type. -type Unmarshaler interface { - UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error -} - -// EventReader provides reading from the EventStream of an reader. -type EventReader struct { - decoder *eventstream.Decoder - - unmarshalerForEventType func(string) (Unmarshaler, error) - payloadUnmarshaler protocol.PayloadUnmarshaler - - payloadBuf []byte -} - -// NewEventReader returns a EventReader built from the reader and unmarshaler -// provided. Use ReadStream method to start reading from the EventStream. -func NewEventReader( - decoder *eventstream.Decoder, - payloadUnmarshaler protocol.PayloadUnmarshaler, - unmarshalerForEventType func(string) (Unmarshaler, error), -) *EventReader { - return &EventReader{ - decoder: decoder, - payloadUnmarshaler: payloadUnmarshaler, - unmarshalerForEventType: unmarshalerForEventType, - payloadBuf: make([]byte, 10*1024), - } -} - -// ReadEvent attempts to read a message from the EventStream and return the -// unmarshaled event value that the message is for. -// -// For EventStream API errors check if the returned error satisfies the -// awserr.Error interface to get the error's Code and Message components. -// -// EventUnmarshalers called with EventStream messages must take copies of the -// message's Payload. The payload will is reused between events read. -func (r *EventReader) ReadEvent() (event interface{}, err error) { - msg, err := r.decoder.Decode(r.payloadBuf) - if err != nil { - return nil, err - } - defer func() { - // Reclaim payload buffer for next message read. - r.payloadBuf = msg.Payload[0:0] - }() - - typ, err := GetHeaderString(msg, MessageTypeHeader) - if err != nil { - return nil, err - } - - switch typ { - case EventMessageType: - return r.unmarshalEventMessage(msg) - case ExceptionMessageType: - return nil, r.unmarshalEventException(msg) - case ErrorMessageType: - return nil, r.unmarshalErrorMessage(msg) - default: - return nil, fmt.Errorf("unknown eventstream message type, %v", typ) - } -} - -func (r *EventReader) unmarshalEventMessage( - msg eventstream.Message, -) (event interface{}, err error) { - eventType, err := GetHeaderString(msg, EventTypeHeader) - if err != nil { - return nil, err - } - - ev, err := r.unmarshalerForEventType(eventType) - if err != nil { - return nil, err - } - - err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) - if err != nil { - return nil, err - } - - return ev, nil -} - -func (r *EventReader) unmarshalEventException( - msg eventstream.Message, -) (err error) { - eventType, err := GetHeaderString(msg, ExceptionTypeHeader) - if err != nil { - return err - } - - ev, err := r.unmarshalerForEventType(eventType) - if err != nil { - return err - } - - err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) - if err != nil { - return err - } - - var ok bool - err, ok = ev.(error) - if !ok { - err = messageError{ - code: "SerializationError", - msg: fmt.Sprintf( - "event stream exception %s mapped to non-error %T, %v", - eventType, ev, ev, - ), - } - } - - return err -} - -func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { - var msgErr messageError - - msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) - if err != nil { - return err - } - - msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) - if err != nil { - return err - } - - return msgErr -} - -// GetHeaderString returns the value of the header as a string. If the header -// is not set or the value is not a string an error will be returned. -func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { - headerVal := msg.Headers.Get(headerName) - if headerVal == nil { - return "", fmt.Errorf("error header %s not present", headerName) - } - - v, ok := headerVal.Get().(string) - if !ok { - return "", fmt.Errorf("error header value is not a string, %T", headerVal) - } - - return v, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go deleted file mode 100644 index e46b8acc..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go +++ /dev/null @@ -1,23 +0,0 @@ -package eventstreamapi - -// EventStream headers with specific meaning to async API functionality. -const ( - ChunkSignatureHeader = `:chunk-signature` // chunk signature for message - DateHeader = `:date` // Date header for signature - - // Message header and values - MessageTypeHeader = `:message-type` // Identifies type of message. - EventMessageType = `event` - ErrorMessageType = `error` - ExceptionMessageType = `exception` - - // Message Events - EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". - - // Message Error - ErrorCodeHeader = `:error-code` - ErrorMessageHeader = `:error-message` - - // Message Exception - ExceptionTypeHeader = `:exception-type` -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go deleted file mode 100644 index 3a7ba5cd..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go +++ /dev/null @@ -1,123 +0,0 @@ -package eventstreamapi - -import ( - "bytes" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol/eventstream" -) - -var timeNow = time.Now - -// StreamSigner defines an interface for the implementation of signing of event stream payloads -type StreamSigner interface { - GetSignature(headers, payload []byte, date time.Time) ([]byte, error) -} - -// SignEncoder envelopes event stream messages -// into an event stream message payload with included -// signature headers using the provided signer and encoder. -type SignEncoder struct { - signer StreamSigner - encoder Encoder - bufEncoder *BufferEncoder - - closeErr error - closed bool -} - -// NewSignEncoder returns a new SignEncoder using the provided stream signer and -// event stream encoder. -func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { - // TODO: Need to pass down logging - - return &SignEncoder{ - signer: signer, - encoder: encoder, - bufEncoder: NewBufferEncoder(), - } -} - -// Close encodes a final event stream signing envelope with an empty event stream -// payload. This final end-frame is used to mark the conclusion of the stream. -func (s *SignEncoder) Close() error { - if s.closed { - return s.closeErr - } - - if err := s.encode([]byte{}); err != nil { - if strings.Contains(err.Error(), "on closed pipe") { - return nil - } - - s.closeErr = err - s.closed = true - return s.closeErr - } - - return nil -} - -// Encode takes the provided message and add envelopes the message -// with the required signature. -func (s *SignEncoder) Encode(msg eventstream.Message) error { - payload, err := s.bufEncoder.Encode(msg) - if err != nil { - return err - } - - return s.encode(payload) -} - -func (s SignEncoder) encode(payload []byte) error { - date := timeNow() - - var msg eventstream.Message - msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) - msg.Payload = payload - - var headers bytes.Buffer - if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { - return err - } - - sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) - if err != nil { - return err - } - - msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) - - return s.encoder.Encode(msg) -} - -// BufferEncoder is a utility that provides a buffered -// event stream encoder -type BufferEncoder struct { - encoder Encoder - buffer *bytes.Buffer -} - -// NewBufferEncoder returns a new BufferEncoder initialized -// with a 1024 byte buffer. -func NewBufferEncoder() *BufferEncoder { - buf := bytes.NewBuffer(make([]byte, 1024)) - return &BufferEncoder{ - encoder: eventstream.NewEncoder(buf), - buffer: buf, - } -} - -// Encode returns the encoded message as a byte slice. -// The returned byte slice will be modified on the next encode call -// and should not be held onto. -func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { - e.buffer.Reset() - - if err := e.encoder.Encode(msg); err != nil { - return nil, err - } - - return e.buffer.Bytes(), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go deleted file mode 100644 index 433bb163..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go +++ /dev/null @@ -1,129 +0,0 @@ -package eventstreamapi - -import ( - "fmt" - "io" - "sync" - - "github.com/aws/aws-sdk-go/aws" -) - -// StreamWriter provides concurrent safe writing to an event stream. -type StreamWriter struct { - eventWriter *EventWriter - stream chan eventWriteAsyncReport - - done chan struct{} - closeOnce sync.Once - err *OnceError - - streamCloser io.Closer -} - -// NewStreamWriter returns a StreamWriter for the event writer, and stream -// closer provided. -func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { - w := &StreamWriter{ - eventWriter: eventWriter, - streamCloser: streamCloser, - stream: make(chan eventWriteAsyncReport), - done: make(chan struct{}), - err: NewOnceError(), - } - go w.writeStream() - - return w -} - -// Close terminates the writers ability to write new events to the stream. Any -// future call to Send will fail with an error. -func (w *StreamWriter) Close() error { - w.closeOnce.Do(w.safeClose) - return w.Err() -} - -func (w *StreamWriter) safeClose() { - close(w.done) -} - -// ErrorSet returns a channel which will be closed -// if an error occurs. -func (w *StreamWriter) ErrorSet() <-chan struct{} { - return w.err.ErrorSet() -} - -// Err returns any error that occurred while attempting to write an event to the -// stream. -func (w *StreamWriter) Err() error { - return w.err.Err() -} - -// Send writes a single event to the stream returning an error if the write -// failed. -// -// Send may be called concurrently. Events will be written to the stream -// safely. -func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { - if err := w.Err(); err != nil { - return err - } - - resultCh := make(chan error) - wrapped := eventWriteAsyncReport{ - Event: event, - Result: resultCh, - } - - select { - case w.stream <- wrapped: - case <-ctx.Done(): - return ctx.Err() - case <-w.done: - return fmt.Errorf("stream closed, unable to send event") - } - - select { - case err := <-resultCh: - return err - case <-ctx.Done(): - return ctx.Err() - case <-w.done: - return fmt.Errorf("stream closed, unable to send event") - } -} - -func (w *StreamWriter) writeStream() { - defer w.Close() - - for { - select { - case wrapper := <-w.stream: - err := w.eventWriter.WriteEvent(wrapper.Event) - wrapper.ReportResult(w.done, err) - if err != nil { - w.err.SetError(err) - return - } - - case <-w.done: - if err := w.streamCloser.Close(); err != nil { - w.err.SetError(err) - } - return - } - } -} - -type eventWriteAsyncReport struct { - Event Marshaler - Result chan<- error -} - -func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { - select { - case e.Result <- err: - return true - case <-cancel: - return false - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go deleted file mode 100644 index 10a3823d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go +++ /dev/null @@ -1,109 +0,0 @@ -package eventstreamapi - -import ( - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/eventstream" -) - -// Marshaler provides a marshaling interface for event types to event stream -// messages. -type Marshaler interface { - MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) -} - -// Encoder is an stream encoder that will encode an event stream message for -// the transport. -type Encoder interface { - Encode(eventstream.Message) error -} - -// EventWriter provides a wrapper around the underlying event stream encoder -// for an io.WriteCloser. -type EventWriter struct { - encoder Encoder - payloadMarshaler protocol.PayloadMarshaler - eventTypeFor func(Marshaler) (string, error) -} - -// NewEventWriter returns a new event stream writer, that will write to the -// writer provided. Use the WriteEvent method to write an event to the stream. -func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), -) *EventWriter { - return &EventWriter{ - encoder: encoder, - payloadMarshaler: pm, - eventTypeFor: eventTypeFor, - } -} - -// WriteEvent writes an event to the stream. Returns an error if the event -// fails to marshal into a message, or writing to the underlying writer fails. -func (w *EventWriter) WriteEvent(event Marshaler) error { - msg, err := w.marshal(event) - if err != nil { - return err - } - - return w.encoder.Encode(msg) -} - -func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { - eventType, err := w.eventTypeFor(event) - if err != nil { - return eventstream.Message{}, err - } - - msg, err := event.MarshalEvent(w.payloadMarshaler) - if err != nil { - return eventstream.Message{}, err - } - - msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) - return msg, nil -} - -//type EventEncoder struct { -// encoder Encoder -// ppayloadMarshaler protocol.PayloadMarshaler -// eventTypeFor func(Marshaler) (string, error) -//} -// -//func (e EventEncoder) Encode(event Marshaler) error { -// msg, err := e.marshal(event) -// if err != nil { -// return err -// } -// -// return w.encoder.Encode(msg) -//} -// -//func (e EventEncoder) marshal(event Marshaler) (eventstream.Message, error) { -// eventType, err := w.eventTypeFor(event) -// if err != nil { -// return eventstream.Message{}, err -// } -// -// msg, err := event.MarshalEvent(w.payloadMarshaler) -// if err != nil { -// return eventstream.Message{}, err -// } -// -// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) -// return msg, nil -//} -// -//func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { -// eventType, err := w.eventTypeFor(event) -// if err != nil { -// return eventstream.Message{}, err -// } -// -// msg, err := event.MarshalEvent(w.payloadMarshaler) -// if err != nil { -// return eventstream.Message{}, err -// } -// -// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) -// return msg, nil -//} -// diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go deleted file mode 100644 index 3b44dde2..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go +++ /dev/null @@ -1,166 +0,0 @@ -package eventstream - -import ( - "encoding/binary" - "fmt" - "io" -) - -// Headers are a collection of EventStream header values. -type Headers []Header - -// Header is a single EventStream Key Value header pair. -type Header struct { - Name string - Value Value -} - -// Set associates the name with a value. If the header name already exists in -// the Headers the value will be replaced with the new one. -func (hs *Headers) Set(name string, value Value) { - var i int - for ; i < len(*hs); i++ { - if (*hs)[i].Name == name { - (*hs)[i].Value = value - return - } - } - - *hs = append(*hs, Header{ - Name: name, Value: value, - }) -} - -// Get returns the Value associated with the header. Nil is returned if the -// value does not exist. -func (hs Headers) Get(name string) Value { - for i := 0; i < len(hs); i++ { - if h := hs[i]; h.Name == name { - return h.Value - } - } - return nil -} - -// Del deletes the value in the Headers if it exists. -func (hs *Headers) Del(name string) { - for i := 0; i < len(*hs); i++ { - if (*hs)[i].Name == name { - copy((*hs)[i:], (*hs)[i+1:]) - (*hs) = (*hs)[:len(*hs)-1] - } - } -} - -func decodeHeaders(r io.Reader) (Headers, error) { - hs := Headers{} - - for { - name, err := decodeHeaderName(r) - if err != nil { - if err == io.EOF { - // EOF while getting header name means no more headers - break - } - return nil, err - } - - value, err := decodeHeaderValue(r) - if err != nil { - return nil, err - } - - hs.Set(name, value) - } - - return hs, nil -} - -func decodeHeaderName(r io.Reader) (string, error) { - var n headerName - - var err error - n.Len, err = decodeUint8(r) - if err != nil { - return "", err - } - - name := n.Name[:n.Len] - if _, err := io.ReadFull(r, name); err != nil { - return "", err - } - - return string(name), nil -} - -func decodeHeaderValue(r io.Reader) (Value, error) { - var raw rawValue - - typ, err := decodeUint8(r) - if err != nil { - return nil, err - } - raw.Type = valueType(typ) - - var v Value - - switch raw.Type { - case trueValueType: - v = BoolValue(true) - case falseValueType: - v = BoolValue(false) - case int8ValueType: - var tv Int8Value - err = tv.decode(r) - v = tv - case int16ValueType: - var tv Int16Value - err = tv.decode(r) - v = tv - case int32ValueType: - var tv Int32Value - err = tv.decode(r) - v = tv - case int64ValueType: - var tv Int64Value - err = tv.decode(r) - v = tv - case bytesValueType: - var tv BytesValue - err = tv.decode(r) - v = tv - case stringValueType: - var tv StringValue - err = tv.decode(r) - v = tv - case timestampValueType: - var tv TimestampValue - err = tv.decode(r) - v = tv - case uuidValueType: - var tv UUIDValue - err = tv.decode(r) - v = tv - default: - panic(fmt.Sprintf("unknown value type %d", raw.Type)) - } - - // Error could be EOF, let caller deal with it - return v, err -} - -const maxHeaderNameLen = 255 - -type headerName struct { - Len uint8 - Name [maxHeaderNameLen]byte -} - -func (v headerName) encode(w io.Writer) error { - if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { - return err - } - - _, err := w.Write(v.Name[:v.Len]) - return err -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go deleted file mode 100644 index 9f509d8f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go +++ /dev/null @@ -1,506 +0,0 @@ -package eventstream - -import ( - "encoding/base64" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" -) - -const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 - -// valueType is the EventStream header value type. -type valueType uint8 - -// Header value types -const ( - trueValueType valueType = iota - falseValueType - int8ValueType // Byte - int16ValueType // Short - int32ValueType // Integer - int64ValueType // Long - bytesValueType - stringValueType - timestampValueType - uuidValueType -) - -func (t valueType) String() string { - switch t { - case trueValueType: - return "bool" - case falseValueType: - return "bool" - case int8ValueType: - return "int8" - case int16ValueType: - return "int16" - case int32ValueType: - return "int32" - case int64ValueType: - return "int64" - case bytesValueType: - return "byte_array" - case stringValueType: - return "string" - case timestampValueType: - return "timestamp" - case uuidValueType: - return "uuid" - default: - return fmt.Sprintf("unknown value type %d", uint8(t)) - } -} - -type rawValue struct { - Type valueType - Len uint16 // Only set for variable length slices - Value []byte // byte representation of value, BigEndian encoding. -} - -func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { - return binaryWriteFields(w, binary.BigEndian, - r.Type, - v, - ) -} - -func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { - binary.Write(w, binary.BigEndian, r.Type) - - _, err := w.Write(v) - return err -} - -func (r rawValue) encodeBytes(w io.Writer, v []byte) error { - if len(v) > maxHeaderValueLen { - return LengthError{ - Part: "header value", - Want: maxHeaderValueLen, Have: len(v), - Value: v, - } - } - r.Len = uint16(len(v)) - - err := binaryWriteFields(w, binary.BigEndian, - r.Type, - r.Len, - ) - if err != nil { - return err - } - - _, err = w.Write(v) - return err -} - -func (r rawValue) encodeString(w io.Writer, v string) error { - if len(v) > maxHeaderValueLen { - return LengthError{ - Part: "header value", - Want: maxHeaderValueLen, Have: len(v), - Value: v, - } - } - r.Len = uint16(len(v)) - - type stringWriter interface { - WriteString(string) (int, error) - } - - err := binaryWriteFields(w, binary.BigEndian, - r.Type, - r.Len, - ) - if err != nil { - return err - } - - if sw, ok := w.(stringWriter); ok { - _, err = sw.WriteString(v) - } else { - _, err = w.Write([]byte(v)) - } - - return err -} - -func decodeFixedBytesValue(r io.Reader, buf []byte) error { - _, err := io.ReadFull(r, buf) - return err -} - -func decodeBytesValue(r io.Reader) ([]byte, error) { - var raw rawValue - var err error - raw.Len, err = decodeUint16(r) - if err != nil { - return nil, err - } - - buf := make([]byte, raw.Len) - _, err = io.ReadFull(r, buf) - if err != nil { - return nil, err - } - - return buf, nil -} - -func decodeStringValue(r io.Reader) (string, error) { - v, err := decodeBytesValue(r) - return string(v), err -} - -// Value represents the abstract header value. -type Value interface { - Get() interface{} - String() string - valueType() valueType - encode(io.Writer) error -} - -// An BoolValue provides eventstream encoding, and representation -// of a Go bool value. -type BoolValue bool - -// Get returns the underlying type -func (v BoolValue) Get() interface{} { - return bool(v) -} - -// valueType returns the EventStream header value type value. -func (v BoolValue) valueType() valueType { - if v { - return trueValueType - } - return falseValueType -} - -func (v BoolValue) String() string { - return strconv.FormatBool(bool(v)) -} - -// encode encodes the BoolValue into an eventstream binary value -// representation. -func (v BoolValue) encode(w io.Writer) error { - return binary.Write(w, binary.BigEndian, v.valueType()) -} - -// An Int8Value provides eventstream encoding, and representation of a Go -// int8 value. -type Int8Value int8 - -// Get returns the underlying value. -func (v Int8Value) Get() interface{} { - return int8(v) -} - -// valueType returns the EventStream header value type value. -func (Int8Value) valueType() valueType { - return int8ValueType -} - -func (v Int8Value) String() string { - return fmt.Sprintf("0x%02x", int8(v)) -} - -// encode encodes the Int8Value into an eventstream binary value -// representation. -func (v Int8Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeScalar(w, v) -} - -func (v *Int8Value) decode(r io.Reader) error { - n, err := decodeUint8(r) - if err != nil { - return err - } - - *v = Int8Value(n) - return nil -} - -// An Int16Value provides eventstream encoding, and representation of a Go -// int16 value. -type Int16Value int16 - -// Get returns the underlying value. -func (v Int16Value) Get() interface{} { - return int16(v) -} - -// valueType returns the EventStream header value type value. -func (Int16Value) valueType() valueType { - return int16ValueType -} - -func (v Int16Value) String() string { - return fmt.Sprintf("0x%04x", int16(v)) -} - -// encode encodes the Int16Value into an eventstream binary value -// representation. -func (v Int16Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int16Value) decode(r io.Reader) error { - n, err := decodeUint16(r) - if err != nil { - return err - } - - *v = Int16Value(n) - return nil -} - -// An Int32Value provides eventstream encoding, and representation of a Go -// int32 value. -type Int32Value int32 - -// Get returns the underlying value. -func (v Int32Value) Get() interface{} { - return int32(v) -} - -// valueType returns the EventStream header value type value. -func (Int32Value) valueType() valueType { - return int32ValueType -} - -func (v Int32Value) String() string { - return fmt.Sprintf("0x%08x", int32(v)) -} - -// encode encodes the Int32Value into an eventstream binary value -// representation. -func (v Int32Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int32Value) decode(r io.Reader) error { - n, err := decodeUint32(r) - if err != nil { - return err - } - - *v = Int32Value(n) - return nil -} - -// An Int64Value provides eventstream encoding, and representation of a Go -// int64 value. -type Int64Value int64 - -// Get returns the underlying value. -func (v Int64Value) Get() interface{} { - return int64(v) -} - -// valueType returns the EventStream header value type value. -func (Int64Value) valueType() valueType { - return int64ValueType -} - -func (v Int64Value) String() string { - return fmt.Sprintf("0x%016x", int64(v)) -} - -// encode encodes the Int64Value into an eventstream binary value -// representation. -func (v Int64Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int64Value) decode(r io.Reader) error { - n, err := decodeUint64(r) - if err != nil { - return err - } - - *v = Int64Value(n) - return nil -} - -// An BytesValue provides eventstream encoding, and representation of a Go -// byte slice. -type BytesValue []byte - -// Get returns the underlying value. -func (v BytesValue) Get() interface{} { - return []byte(v) -} - -// valueType returns the EventStream header value type value. -func (BytesValue) valueType() valueType { - return bytesValueType -} - -func (v BytesValue) String() string { - return base64.StdEncoding.EncodeToString([]byte(v)) -} - -// encode encodes the BytesValue into an eventstream binary value -// representation. -func (v BytesValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeBytes(w, []byte(v)) -} - -func (v *BytesValue) decode(r io.Reader) error { - buf, err := decodeBytesValue(r) - if err != nil { - return err - } - - *v = BytesValue(buf) - return nil -} - -// An StringValue provides eventstream encoding, and representation of a Go -// string. -type StringValue string - -// Get returns the underlying value. -func (v StringValue) Get() interface{} { - return string(v) -} - -// valueType returns the EventStream header value type value. -func (StringValue) valueType() valueType { - return stringValueType -} - -func (v StringValue) String() string { - return string(v) -} - -// encode encodes the StringValue into an eventstream binary value -// representation. -func (v StringValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeString(w, string(v)) -} - -func (v *StringValue) decode(r io.Reader) error { - s, err := decodeStringValue(r) - if err != nil { - return err - } - - *v = StringValue(s) - return nil -} - -// An TimestampValue provides eventstream encoding, and representation of a Go -// timestamp. -type TimestampValue time.Time - -// Get returns the underlying value. -func (v TimestampValue) Get() interface{} { - return time.Time(v) -} - -// valueType returns the EventStream header value type value. -func (TimestampValue) valueType() valueType { - return timestampValueType -} - -func (v TimestampValue) epochMilli() int64 { - nano := time.Time(v).UnixNano() - msec := nano / int64(time.Millisecond) - return msec -} - -func (v TimestampValue) String() string { - msec := v.epochMilli() - return strconv.FormatInt(msec, 10) -} - -// encode encodes the TimestampValue into an eventstream binary value -// representation. -func (v TimestampValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - msec := v.epochMilli() - return raw.encodeScalar(w, msec) -} - -func (v *TimestampValue) decode(r io.Reader) error { - n, err := decodeUint64(r) - if err != nil { - return err - } - - *v = TimestampValue(timeFromEpochMilli(int64(n))) - return nil -} - -// MarshalJSON implements the json.Marshaler interface -func (v TimestampValue) MarshalJSON() ([]byte, error) { - return []byte(v.String()), nil -} - -func timeFromEpochMilli(t int64) time.Time { - secs := t / 1e3 - msec := t % 1e3 - return time.Unix(secs, msec*int64(time.Millisecond)).UTC() -} - -// An UUIDValue provides eventstream encoding, and representation of a UUID -// value. -type UUIDValue [16]byte - -// Get returns the underlying value. -func (v UUIDValue) Get() interface{} { - return v[:] -} - -// valueType returns the EventStream header value type value. -func (UUIDValue) valueType() valueType { - return uuidValueType -} - -func (v UUIDValue) String() string { - return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) -} - -// encode encodes the UUIDValue into an eventstream binary value -// representation. -func (v UUIDValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeFixedSlice(w, v[:]) -} - -func (v *UUIDValue) decode(r io.Reader) error { - tv := (*v)[:] - return decodeFixedBytesValue(r, tv) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go deleted file mode 100644 index 25c9783c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go +++ /dev/null @@ -1,103 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "hash/crc32" -) - -const preludeLen = 8 -const preludeCRCLen = 4 -const msgCRCLen = 4 -const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen -const maxPayloadLen = 1024 * 1024 * 16 // 16MB -const maxHeadersLen = 1024 * 128 // 128KB -const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen - -var crc32IEEETable = crc32.MakeTable(crc32.IEEE) - -// A Message provides the eventstream message representation. -type Message struct { - Headers Headers - Payload []byte -} - -func (m *Message) rawMessage() (rawMessage, error) { - var raw rawMessage - - if len(m.Headers) > 0 { - var headers bytes.Buffer - if err := EncodeHeaders(&headers, m.Headers); err != nil { - return rawMessage{}, err - } - raw.Headers = headers.Bytes() - raw.HeadersLen = uint32(len(raw.Headers)) - } - - raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen - - hash := crc32.New(crc32IEEETable) - binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) - raw.PreludeCRC = hash.Sum32() - - binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) - - if raw.HeadersLen > 0 { - hash.Write(raw.Headers) - } - - // Read payload bytes and update hash for it as well. - if len(m.Payload) > 0 { - raw.Payload = m.Payload - hash.Write(raw.Payload) - } - - raw.CRC = hash.Sum32() - - return raw, nil -} - -type messagePrelude struct { - Length uint32 - HeadersLen uint32 - PreludeCRC uint32 -} - -func (p messagePrelude) PayloadLen() uint32 { - return p.Length - p.HeadersLen - minMsgLen -} - -func (p messagePrelude) ValidateLens() error { - if p.Length == 0 || p.Length > maxMsgLen { - return LengthError{ - Part: "message prelude", - Want: maxMsgLen, - Have: int(p.Length), - } - } - if p.HeadersLen > maxHeadersLen { - return LengthError{ - Part: "message headers", - Want: maxHeadersLen, - Have: int(p.HeadersLen), - } - } - if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { - return LengthError{ - Part: "message payload", - Want: maxPayloadLen, - Have: int(payloadLen), - } - } - - return nil -} - -type rawMessage struct { - messagePrelude - - Headers []byte - Payload []byte - - CRC uint32 -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go deleted file mode 100644 index d7d42db0..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go +++ /dev/null @@ -1,68 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// ValidateEndpointHostHandler is a request handler that will validate the -// request endpoint's hosts is a valid RFC 3986 host. -var ValidateEndpointHostHandler = request.NamedHandler{ - Name: "awssdk.protocol.ValidateEndpointHostHandler", - Fn: func(r *request.Request) { - err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) - if err != nil { - r.Error = err - } - }, -} - -// ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. -func ValidateEndpointHost(opName, host string) error { - paramErrs := request.ErrInvalidParams{Context: opName} - labels := strings.Split(host, ".") - - for i, label := range labels { - if i == len(labels)-1 && len(label) == 0 { - // Allow trailing dot for FQDN hosts. - continue - } - - if !ValidHostLabel(label) { - paramErrs.Add(request.NewErrParamFormat( - "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) - } - } - - if len(host) > 255 { - paramErrs.Add(request.NewErrParamMaxLen( - "endpoint host", 255, host, - )) - } - - if paramErrs.Len() > 0 { - return paramErrs - } - return nil -} - -// ValidHostLabel returns if the label is a valid RFC 3986 host label. -func ValidHostLabel(label string) bool { - if l := len(label); l == 0 || l > 63 { - return false - } - for _, r := range label { - switch { - case r >= '0' && r <= '9': - case r >= 'A' && r <= 'Z': - case r >= 'a' && r <= 'z': - case r == '-': - default: - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go deleted file mode 100644 index 915b0fca..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go +++ /dev/null @@ -1,54 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// HostPrefixHandlerName is the handler name for the host prefix request -// handler. -const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" - -// NewHostPrefixHandler constructs a build handler -func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { - builder := HostPrefixBuilder{ - Prefix: prefix, - LabelsFn: labelsFn, - } - - return request.NamedHandler{ - Name: HostPrefixHandlerName, - Fn: builder.Build, - } -} - -// HostPrefixBuilder provides the request handler to expand and prepend -// the host prefix into the operation's request endpoint host. -type HostPrefixBuilder struct { - Prefix string - LabelsFn func() map[string]string -} - -// Build updates the passed in Request with the HostPrefix template expanded. -func (h HostPrefixBuilder) Build(r *request.Request) { - if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { - return - } - - var labels map[string]string - if h.LabelsFn != nil { - labels = h.LabelsFn() - } - - prefix := h.Prefix - for name, value := range labels { - prefix = strings.Replace(prefix, "{"+name+"}", value, -1) - } - - r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host - if len(r.HTTPRequest.Host) > 0 { - r.HTTPRequest.Host = prefix + r.HTTPRequest.Host - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go deleted file mode 100644 index 53831dff..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "fmt" - "reflect" -) - -// RandReader is the random reader the protocol package will use to read -// random bytes from. This is exported for testing, and should not be used. -var RandReader = rand.Reader - -const idempotencyTokenFillTag = `idempotencyToken` - -// CanSetIdempotencyToken returns true if the struct field should be -// automatically populated with a Idempotency token. -// -// Only *string and string type fields that are tagged with idempotencyToken -// which are not already set can be auto filled. -func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { - switch u := v.Interface().(type) { - // To auto fill an Idempotency token the field must be a string, - // tagged for auto fill, and have a zero value. - case *string: - return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - case string: - return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - } - - return false -} - -// GetIdempotencyToken returns a randomly generated idempotency token. -func GetIdempotencyToken() string { - b := make([]byte, 16) - RandReader.Read(b) - - return UUIDVersion4(b) -} - -// SetIdempotencyToken will set the value provided with a Idempotency Token. -// Given that the value can be set. Will panic if value is not setable. -func SetIdempotencyToken(v reflect.Value) { - if v.Kind() == reflect.Ptr { - if v.IsNil() && v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = reflect.Indirect(v) - - if !v.CanSet() { - panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) - } - - b := make([]byte, 16) - _, err := rand.Read(b) - if err != nil { - // TODO handle error - return - } - - v.Set(reflect.ValueOf(UUIDVersion4(b))) -} - -// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided -func UUIDVersion4(u []byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - // 13th character is "4" - u[6] = (u[6] | 0x40) & 0x4F - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] | 0x80) & 0xBF - - return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go deleted file mode 100644 index 864fb670..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ /dev/null @@ -1,296 +0,0 @@ -// Package jsonutil provides JSON serialization of AWS requests and responses. -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/private/protocol" -) - -var timeType = reflect.ValueOf(time.Time{}).Type() -var byteSliceType = reflect.ValueOf([]byte{}).Type() - -// BuildJSON builds a JSON string for a given object v. -func BuildJSON(v interface{}) ([]byte, error) { - var buf bytes.Buffer - - err := buildAny(reflect.ValueOf(v), &buf, "") - return buf.Bytes(), err -} - -func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - origVal := value - value = reflect.Indirect(value) - if !value.IsValid() { - return nil - } - - vtype := value.Type() - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if value.Type() != timeType { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return buildStruct(value, buf, tag) - case "list": - return buildList(value, buf, tag) - case "map": - return buildMap(value, buf, tag) - default: - return buildScalar(origVal, buf, tag) - } -} - -func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - buf.WriteByte('{') - - t := value.Type() - first := true - for i := 0; i < t.NumField(); i++ { - member := value.Field(i) - - // This allocates the most memory. - // Additionally, we cannot skip nil fields due to - // idempotency auto filling. - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("json") == "-" { - continue - } - if field.Tag.Get("location") != "" { - continue // ignore non-body elements - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(member, field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(&token) - } - - if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { - continue // ignore unset fields - } - - if first { - first = false - } else { - buf.WriteByte(',') - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - writeString(name, buf) - buf.WriteString(`:`) - - err := buildAny(member, buf, field.Tag) - if err != nil { - return err - } - - } - - buf.WriteString("}") - - return nil -} - -func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("[") - - for i := 0; i < value.Len(); i++ { - buildAny(value.Index(i), buf, "") - - if i < value.Len()-1 { - buf.WriteString(",") - } - } - - buf.WriteString("]") - - return nil -} - -type sortedValues []reflect.Value - -func (sv sortedValues) Len() int { return len(sv) } -func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } - -func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("{") - - sv := sortedValues(value.MapKeys()) - sort.Sort(sv) - - for i, k := range sv { - if i > 0 { - buf.WriteByte(',') - } - - writeString(k.String(), buf) - buf.WriteString(`:`) - - buildAny(value.MapIndex(k), buf, "") - } - - buf.WriteString("}") - - return nil -} - -func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - // prevents allocation on the heap. - scratch := [64]byte{} - switch value := reflect.Indirect(v); value.Kind() { - case reflect.String: - writeString(value.String(), buf) - case reflect.Bool: - if value.Bool() { - buf.WriteString("true") - } else { - buf.WriteString("false") - } - case reflect.Int64: - buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) - case reflect.Float64: - f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} - } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) - default: - switch converted := value.Interface().(type) { - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.UnixTimeFormatName - } - - ts := protocol.FormatTime(format, converted) - if format != protocol.UnixTimeFormatName { - ts = `"` + ts + `"` - } - - buf.WriteString(ts) - case []byte: - if !value.IsNil() { - buf.WriteByte('"') - if len(converted) < 1024 { - // for small buffers, using Encode directly is much faster. - dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) - base64.StdEncoding.Encode(dst, converted) - buf.Write(dst) - } else { - // for large buffers, avoid unnecessary extra temporary - // buffer space. - enc := base64.NewEncoder(base64.StdEncoding, buf) - enc.Write(converted) - enc.Close() - } - buf.WriteByte('"') - } - case aws.JSONValue: - str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) - if err != nil { - return fmt.Errorf("unable to encode JSONValue, %v", err) - } - buf.WriteString(str) - default: - return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) - } - } - return nil -} - -var hex = "0123456789abcdef" - -func writeString(s string, buf *bytes.Buffer) { - buf.WriteByte('"') - for i := 0; i < len(s); i++ { - if s[i] == '"' { - buf.WriteString(`\"`) - } else if s[i] == '\\' { - buf.WriteString(`\\`) - } else if s[i] == '\b' { - buf.WriteString(`\b`) - } else if s[i] == '\f' { - buf.WriteString(`\f`) - } else if s[i] == '\r' { - buf.WriteString(`\r`) - } else if s[i] == '\t' { - buf.WriteString(`\t`) - } else if s[i] == '\n' { - buf.WriteString(`\n`) - } else if s[i] < 32 { - buf.WriteString("\\u00") - buf.WriteByte(hex[s[i]>>4]) - buf.WriteByte(hex[s[i]&0xF]) - } else { - buf.WriteByte(s[i]) - } - } - buf.WriteByte('"') -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go deleted file mode 100644 index 5e949969..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ /dev/null @@ -1,282 +0,0 @@ -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in -// type. The value to unmarshal the json document into must be a pointer to the -// type. -func UnmarshalJSONError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := json.NewDecoder(body).Decode(v) - if err != nil { - msg := "failed decoding error message" - if err == io.EOF { - msg = "error message missing" - err = nil - } - return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) - } - - return nil -} - -// UnmarshalJSON reads a stream and unmarshals the results in object v. -func UnmarshalJSON(v interface{}, stream io.Reader) error { - var out interface{} - - err := json.NewDecoder(stream).Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") -} - -// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the -// object v. Ignores casing for structure members. -func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { - var out interface{} - - err := json.NewDecoder(stream).Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshaler{ - caseInsensitive: true, - }.unmarshalAny(reflect.ValueOf(v), out, "") -} - -type unmarshaler struct { - caseInsensitive bool -} - -func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { - vtype := value.Type() - if vtype.Kind() == reflect.Ptr { - vtype = vtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := value.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return u.unmarshalStruct(value, data, tag) - case "list": - return u.unmarshalList(value, data, tag) - case "map": - return u.unmarshalMap(value, data, tag) - default: - return u.unmarshalScalar(value, data, tag) - } -} - -func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a structure (%#v)", data) - } - - t := value.Type() - if value.Kind() == reflect.Ptr { - if value.IsNil() { // create the structure if it's nil - s := reflect.New(value.Type().Elem()) - value.Set(s) - value = s - } - - value = value.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if field.PkgPath != "" { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if u.caseInsensitive { - if _, ok := mapData[name]; !ok { - // Fallback to uncased name search if the exact name didn't match. - for kn, v := range mapData { - if strings.EqualFold(kn, name) { - mapData[name] = v - } - } - } - } - - member := value.FieldByIndex(field.Index) - err := u.unmarshalAny(member, mapData[name], field.Tag) - if err != nil { - return err - } - } - return nil -} - -func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - listData, ok := data.([]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a list (%#v)", data) - } - - if value.IsNil() { - l := len(listData) - value.Set(reflect.MakeSlice(value.Type(), l, l)) - } - - for i, c := range listData { - err := u.unmarshalAny(value.Index(i), c, "") - if err != nil { - return err - } - } - - return nil -} - -func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a map (%#v)", data) - } - - if value.IsNil() { - value.Set(reflect.MakeMap(value.Type())) - } - - for k, v := range mapData { - kvalue := reflect.ValueOf(k) - vvalue := reflect.New(value.Type().Elem()).Elem() - - u.unmarshalAny(vvalue, v, "") - value.SetMapIndex(kvalue, vvalue) - } - - return nil -} - -func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { - - switch d := data.(type) { - case nil: - return nil // nothing to do here - case string: - switch value.Interface().(type) { - case *string: - value.Set(reflect.ValueOf(&d)) - case []byte: - b, err := base64.StdEncoding.DecodeString(d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(b)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - // No need to use escaping as the value is a non-quoted string. - v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) - if err != nil { - return err - } - value.Set(reflect.ValueOf(v)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case float64: - switch value.Interface().(type) { - case *int64: - di := int64(d) - value.Set(reflect.ValueOf(&di)) - case *float64: - value.Set(reflect.ValueOf(&d)) - case *time.Time: - // Time unmarshaled from a float64 can only be epoch seconds - t := time.Unix(int64(d), 0).UTC() - value.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case bool: - switch value.Interface().(type) { - case *bool: - value.Set(reflect.ValueOf(&d)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - default: - return fmt.Errorf("unsupported JSON value (%v)", data) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go deleted file mode 100644 index 776d1101..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go +++ /dev/null @@ -1,76 +0,0 @@ -package protocol - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strconv" - - "github.com/aws/aws-sdk-go/aws" -) - -// EscapeMode is the mode that should be use for escaping a value -type EscapeMode uint - -// The modes for escaping a value before it is marshaled, and unmarshaled. -const ( - NoEscape EscapeMode = iota - Base64Escape - QuotedEscape -) - -// EncodeJSONValue marshals the value into a JSON string, and optionally base64 -// encodes the string before returning it. -// -// Will panic if the escape mode is unknown. -func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { - b, err := json.Marshal(v) - if err != nil { - return "", err - } - - switch escape { - case NoEscape: - return string(b), nil - case Base64Escape: - return base64.StdEncoding.EncodeToString(b), nil - case QuotedEscape: - return strconv.Quote(string(b)), nil - } - - panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) -} - -// DecodeJSONValue will attempt to decode the string input as a JSONValue. -// Optionally decoding base64 the value first before JSON unmarshaling. -// -// Will panic if the escape mode is unknown. -func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { - var b []byte - var err error - - switch escape { - case NoEscape: - b = []byte(v) - case Base64Escape: - b, err = base64.StdEncoding.DecodeString(v) - case QuotedEscape: - var u string - u, err = strconv.Unquote(v) - b = []byte(u) - default: - panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) - } - - if err != nil { - return nil, err - } - - m := aws.JSONValue{} - err = json.Unmarshal(b, &m) - if err != nil { - return nil, err - } - - return m, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go deleted file mode 100644 index 0ea0647a..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go +++ /dev/null @@ -1,81 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - "net/http" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// PayloadUnmarshaler provides the interface for unmarshaling a payload's -// reader into a SDK shape. -type PayloadUnmarshaler interface { - UnmarshalPayload(io.Reader, interface{}) error -} - -// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a -// HandlerList. This provides the support for unmarshaling a payload reader to -// a shape without needing a SDK request first. -type HandlerPayloadUnmarshal struct { - Unmarshalers request.HandlerList -} - -// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using -// the Unmarshalers HandlerList provided. Returns an error if unable -// unmarshaling fails. -func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { - req := &request.Request{ - HTTPRequest: &http.Request{}, - HTTPResponse: &http.Response{ - StatusCode: 200, - Header: http.Header{}, - Body: ioutil.NopCloser(r), - }, - Data: v, - } - - h.Unmarshalers.Run(req) - - return req.Error -} - -// PayloadMarshaler provides the interface for marshaling a SDK shape into and -// io.Writer. -type PayloadMarshaler interface { - MarshalPayload(io.Writer, interface{}) error -} - -// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. -// This provides support for marshaling a SDK shape into an io.Writer without -// needing a SDK request first. -type HandlerPayloadMarshal struct { - Marshalers request.HandlerList -} - -// MarshalPayload marshals the SDK shape into the io.Writer using the -// Marshalers HandlerList provided. Returns an error if unable if marshal -// fails. -func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { - req := request.New( - aws.Config{}, - metadata.ClientInfo{}, - request.Handlers{}, - nil, - &request.Operation{HTTPMethod: "PUT"}, - v, - nil, - ) - - h.Marshalers.Run(req) - - if req.Error != nil { - return req.Error - } - - io.Copy(w, req.GetBody()) - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go deleted file mode 100644 index 9d521dcb..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go +++ /dev/null @@ -1,49 +0,0 @@ -package protocol - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RequireHTTPMinProtocol request handler is used to enforce that -// the target endpoint supports the given major and minor HTTP protocol version. -type RequireHTTPMinProtocol struct { - Major, Minor int -} - -// Handler will mark the request.Request with an error if the -// target endpoint did not connect with the required HTTP protocol -// major and minor version. -func (p RequireHTTPMinProtocol) Handler(r *request.Request) { - if r.Error != nil || r.HTTPResponse == nil { - return - } - - if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { - r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) - } - - if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { - r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) - } -} - -// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint -// did not match the required HTTP major and minor protocol version. -const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" - -func newMinHTTPProtoError(major, minor int, r *request.Request) error { - return awserr.NewRequestFailure( - awserr.New("MinimumHTTPProtocolError", - fmt.Sprintf( - "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", - major, minor, r.HTTPResponse.Proto, - ), - nil, - ), - r.HTTPResponse.StatusCode, r.RequestID, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go deleted file mode 100644 index 0cb99eb5..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ /dev/null @@ -1,36 +0,0 @@ -// Package query provides serialization of AWS query requests, and responses. -package query - -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go - -import ( - "net/url" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" -) - -// BuildHandler is a named request handler for building query protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} - -// Build builds a request for an AWS Query service. -func Build(r *request.Request) { - body := url.Values{ - "Action": {r.Operation.Name}, - "Version": {r.ClientInfo.APIVersion}, - } - if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) - return - } - - if !r.IsPresigned() { - r.HTTPRequest.Method = "POST" - r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - r.SetBufferBody([]byte(body.Encode())) - } else { // This is a pre-signed request - r.HTTPRequest.Method = "GET" - r.HTTPRequest.URL.RawQuery = body.Encode() - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go deleted file mode 100644 index 75866d01..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ /dev/null @@ -1,246 +0,0 @@ -package queryutil - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Parse parses an object i and fills a url.Values object. The isEC2 flag -// indicates if this is the EC2 Query sub-protocol. -func Parse(body url.Values, i interface{}, isEC2 bool) error { - q := queryParser{isEC2: isEC2} - return q.parseValue(body, reflect.ValueOf(i), "", "") -} - -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -type queryParser struct { - isEC2 bool -} - -func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - value = elemOf(value) - - // no need to handle zero values - if !value.IsValid() { - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - return q.parseStruct(v, value, prefix) - case "list": - return q.parseList(v, value, prefix, tag) - case "map": - return q.parseMap(v, value, prefix, tag) - default: - return q.parseScalar(v, value, prefix, tag) - } -} - -func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { - if !value.IsValid() { - return nil - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - elemValue := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - elemValue = reflect.ValueOf(token) - } - - var name string - if q.isEC2 { - name = field.Tag.Get("queryName") - } - if name == "" { - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if name != "" && q.isEC2 { - name = strings.ToUpper(name[0:1]) + name[1:] - } - } - if name == "" { - name = field.Name - } - - if prefix != "" { - name = prefix + "." + name - } - - if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - if _, ok := value.Interface().([]byte); ok { - return q.parseScalar(v, value, prefix, tag) - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - if listName := tag.Get("locationNameList"); listName == "" { - prefix += ".member" - } else { - prefix += "." + listName - } - } - - for i := 0; i < value.Len(); i++ { - slicePrefix := prefix - if slicePrefix == "" { - slicePrefix = strconv.Itoa(i + 1) - } else { - slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) - } - if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".entry" - } - - // sort keys for improved serialization consistency. - // this is not strictly necessary for protocol support. - mapKeyValues := value.MapKeys() - mapKeys := map[string]reflect.Value{} - mapKeyNames := make([]string, len(mapKeyValues)) - for i, mapKey := range mapKeyValues { - name := mapKey.String() - mapKeys[name] = mapKey - mapKeyNames[i] = name - } - sort.Strings(mapKeyNames) - - for i, mapKeyName := range mapKeyNames { - mapKey := mapKeys[mapKeyName] - mapValue := value.MapIndex(mapKey) - - kname := tag.Get("locationNameKey") - if kname == "" { - kname = "key" - } - vname := tag.Get("locationNameValue") - if vname == "" { - vname = "value" - } - - // serialize key - var keyName string - if prefix == "" { - keyName = strconv.Itoa(i+1) + "." + kname - } else { - keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname - } - - if err := q.parseValue(v, mapKey, keyName, ""); err != nil { - return err - } - - // serialize value - var valueName string - if prefix == "" { - valueName = strconv.Itoa(i+1) + "." + vname - } else { - valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname - } - - if err := q.parseValue(v, mapValue, valueName, ""); err != nil { - return err - } - } - - return nil -} - -func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { - switch value := r.Interface().(type) { - case string: - v.Set(name, value) - case []byte: - if !r.IsNil() { - v.Set(name, base64.StdEncoding.EncodeToString(value)) - } - case bool: - v.Set(name, strconv.FormatBool(value)) - case int64: - v.Set(name, strconv.FormatInt(value, 10)) - case int: - v.Set(name, strconv.Itoa(value)) - case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) - case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - v.Set(name, protocol.FormatTime(format, value)) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go deleted file mode 100644 index f69c1efc..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ /dev/null @@ -1,39 +0,0 @@ -package query - -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go - -import ( - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalHandler is a named request handler for unmarshaling query protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals a response for an AWS Query service. -func Unmarshal(r *request.Request) { - defer r.HTTPResponse.Body.Close() - if r.DataFilled() { - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - } -} - -// UnmarshalMeta unmarshals header response values for an AWS Query service. -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go deleted file mode 100644 index 831b0110..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ /dev/null @@ -1,69 +0,0 @@ -package query - -import ( - "encoding/xml" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} - -type xmlErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} - -type xmlResponseError struct { - xmlErrorResponse -} - -func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - const svcUnavailableTagName = "ServiceUnavailableException" - const errorResponseTagName = "ErrorResponse" - - switch start.Name.Local { - case svcUnavailableTagName: - e.Code = svcUnavailableTagName - e.Message = "service is unavailable" - return d.Skip() - - case errorResponseTagName: - return d.DecodeElement(&e.xmlErrorResponse, &start) - - default: - return fmt.Errorf("unknown error response tag, %v", start) - } -} - -// UnmarshalError unmarshals an error response for an AWS Query service. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var respErr xmlResponseError - err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - reqID := respErr.RequestID - if len(reqID) == 0 { - reqID = r.RequestID - } - - r.Error = awserr.NewRequestFailure( - awserr.New(respErr.Code, respErr.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go deleted file mode 100644 index 1301b149..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ /dev/null @@ -1,310 +0,0 @@ -// Package rest provides RESTful serialization of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -var errValueNotSet = fmt.Errorf("value not set") - -var byteSliceType = reflect.TypeOf([]byte{}) - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// BuildHandler is a named request handler for building rest protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} - -// Build builds the REST component of a service request. -func Build(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, false) - buildBody(r, v) - } -} - -// BuildAsGET builds the REST component of a service request with the ability to hoist -// data from the body. -func BuildAsGET(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, true) - buildBody(r, v) - } -} - -func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { - query := r.HTTPRequest.URL.Query() - - // Setup the raw path to match the base path pattern. This is needed - // so that when the path is mutated a custom escaped version can be - // stored in RawPath that will be used by the Go client. - r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - if kind := m.Kind(); kind == reflect.Ptr { - m = m.Elem() - } else if kind == reflect.Interface { - if !m.Elem().IsValid() { - continue - } - } - if !m.IsValid() { - continue - } - if field.Tag.Get("ignore") != "" { - continue - } - - // Support the ability to customize values to be marshaled as a - // blob even though they were modeled as a string. Required for S3 - // API operations like SSECustomerKey is modeled as stirng but - // required to be base64 encoded in request. - if field.Tag.Get("marshal-as") == "blob" { - m = m.Convert(byteSliceType) - } - - var err error - switch field.Tag.Get("location") { - case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) - case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) - case "uri": - err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) - case "querystring": - err = buildQueryString(query, m, name, field.Tag) - default: - if buildGETQuery { - err = buildQueryString(query, m, name, field.Tag) - } - } - r.Error = err - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { - cleanPath(r.HTTPRequest.URL) - } -} - -func buildBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New(request.ErrCodeSerialization, - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - - name = strings.TrimSpace(name) - str = strings.TrimSpace(str) - - header.Add(name, str) - - return nil -} - -func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { - prefix := tag.Get("locationName") - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key), tag) - if err == errValueNotSet { - continue - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - - } - keyStr := strings.TrimSpace(key.String()) - str = strings.TrimSpace(str) - - header.Add(prefix+keyStr, str) - } - return nil -} - -func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { - value, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - - u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) - u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) - - u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) - u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) - - return nil -} - -func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { - switch value := v.Interface().(type) { - case []*string: - for _, item := range value { - query.Add(name, *item) - } - case map[string]*string: - for key, item := range value { - query.Add(key, *item) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - default: - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - query.Set(name, str) - } - - return nil -} - -func cleanPath(u *url.URL) { - hasSlash := strings.HasSuffix(u.Path, "/") - - // clean up path, removing duplicate `/` - u.Path = path.Clean(u.Path) - u.RawPath = path.Clean(u.RawPath) - - if hasSlash && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - u.RawPath += "/" - } -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return "", errValueNotSet - } - - switch value := v.Interface().(type) { - case string: - str = value - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - if tag.Get("location") == "querystring" { - format = protocol.ISO8601TimeFormatName - } - } - str = protocol.FormatTime(format, value) - case aws.JSONValue: - if len(value) == 0 { - return "", errValueNotSet - } - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - str, err = protocol.EncodeJSONValue(value, escaping) - if err != nil { - return "", fmt.Errorf("unable to encode JSONValue, %v", err) - } - default: - err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) - return "", err - } - return str, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go deleted file mode 100644 index 4366de2e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ /dev/null @@ -1,45 +0,0 @@ -package rest - -import "reflect" - -// PayloadMember returns the payload field member of i if there is one, or nil. -func PayloadMember(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i).Elem() - if !v.IsValid() { - return nil - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - field, _ := v.Type().FieldByName(payloadName) - if field.Tag.Get("type") != "structure" { - return nil - } - - payload := v.FieldByName(payloadName) - if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { - return payload.Interface() - } - } - } - return nil -} - -// PayloadType returns the type of a payload field member of i if there is one, or "". -func PayloadType(i interface{}) string { - v := reflect.Indirect(reflect.ValueOf(i)) - if !v.IsValid() { - return "" - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - if member, ok := v.Type().FieldByName(payloadName); ok { - return member.Tag.Get("type") - } - } - } - return "" -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go deleted file mode 100644 index 92f8b4d9..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ /dev/null @@ -1,257 +0,0 @@ -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - awsStrings "github.com/aws/aws-sdk-go/internal/strings" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *request.Request) { - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - if err := unmarshalBody(r, v); err != nil { - r.Error = err - } - } -} - -// UnmarshalMeta unmarshals the REST metadata of a response in a REST service -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") - if r.RequestID == "" { - // Alternative version of request id in the header - r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") - } - if r.DataFilled() { - if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { - r.Error = err - } - } -} - -// UnmarshalResponse attempts to unmarshal the REST response headers to -// the data type passed in. The type must be a pointer. An error is returned -// with any error unmarshaling the response into the target datatype. -func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { - v := reflect.Indirect(reflect.ValueOf(data)) - return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) -} - -func unmarshalBody(r *request.Request, v reflect.Value) error { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := v.FieldByName(payloadName) - if payload.IsValid() { - switch payload.Interface().(type) { - case []byte: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - payload.Set(reflect.ValueOf(b)) - - case *string: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - str := string(b) - payload.Set(reflect.ValueOf(&str)) - - default: - switch payload.Type().String() { - case "io.ReadCloser": - payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) - - case "io.ReadSeeker": - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, - "failed to read response body", err) - } - payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) - - default: - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() - return awserr.New(request.ErrCodeSerialization, - "failed to decode REST response", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } - } - - return nil -} - -func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { - for i := 0; i < v.NumField(); i++ { - m, field := v.Field(i), v.Type().Field(i) - if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch field.Tag.Get("location") { - case "statusCode": - unmarshalStatusCode(m, resp.StatusCode) - - case "header": - err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - case "headers": - prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) - if err != nil { - awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - } - } - } - - return nil -} - -func unmarshalStatusCode(v reflect.Value, statusCode int) { - if !v.IsValid() { - return - } - - switch v.Interface().(type) { - case *int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(&s)) - } -} - -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { - if len(headers) == 0 { - return nil - } - switch r.Interface().(type) { - case map[string]*string: // we only support string map value types - out := map[string]*string{} - for k, v := range headers { - if awsStrings.HasPrefixFold(k, prefix) { - if normalize == true { - k = strings.ToLower(k) - } else { - k = http.CanonicalHeaderKey(k) - } - out[k[len(prefix):]] = &v[0] - } - } - if len(out) != 0 { - r.Set(reflect.ValueOf(out)) - } - - } - return nil -} - -func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - switch tag.Get("type") { - case "jsonvalue": - if len(header) == 0 { - return nil - } - case "blob": - if len(header) == 0 { - return nil - } - default: - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } - } - - switch v.Interface().(type) { - case *string: - v.Set(reflect.ValueOf(&header)) - case []byte: - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(b)) - case *bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&i)) - case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&f)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - } - t, err := protocol.ParseTime(format, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - m, err := protocol.DecodeJSONValue(header, escaping) - if err != nil { - return err - } - v.Set(reflect.ValueOf(m)) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return err - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go deleted file mode 100644 index 07a6187e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ /dev/null @@ -1,79 +0,0 @@ -// Package restxml provides RESTful XML serialization of AWS -// requests and responses. -package restxml - -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go - -import ( - "bytes" - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/protocol/rest" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// BuildHandler is a named request handler for building restxml protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} - -// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} - -// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} - -// Build builds a request payload for the REST XML protocol. -func Build(r *request.Request) { - rest.Build(r) - - if t := rest.PayloadType(r.Params); t == "structure" || t == "" { - var buf bytes.Buffer - err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to encode rest XML request", err), - 0, - r.RequestID, - ) - return - } - r.SetBufferBody(buf.Bytes()) - } -} - -// Unmarshal unmarshals a payload response for the REST XML protocol. -func Unmarshal(r *request.Request) { - if t := rest.PayloadType(r.Data); t == "structure" || t == "" { - defer r.HTTPResponse.Body.Close() - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, "") - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to decode REST XML response", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - } else { - rest.Unmarshal(r) - } -} - -// UnmarshalMeta unmarshals response headers for the REST XML protocol. -func UnmarshalMeta(r *request.Request) { - rest.UnmarshalMeta(r) -} - -// UnmarshalError unmarshals a response error for the REST XML protocol. -func UnmarshalError(r *request.Request) { - query.UnmarshalError(r) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go deleted file mode 100644 index 05d4ff51..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ /dev/null @@ -1,84 +0,0 @@ -package protocol - -import ( - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/internal/sdkmath" -) - -// Names of time formats supported by the SDK -const ( - RFC822TimeFormatName = "rfc822" - ISO8601TimeFormatName = "iso8601" - UnixTimeFormatName = "unixTimestamp" -) - -// Time formats supported by the SDK -// Output time is intended to not contain decimals -const ( - // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT - RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" - - // This format is used for output time without seconds precision - RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - - // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" - - // This format is used for output time without seconds precision - ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" -) - -// IsKnownTimestampFormat returns if the timestamp format name -// is know to the SDK's protocols. -func IsKnownTimestampFormat(name string) bool { - switch name { - case RFC822TimeFormatName: - fallthrough - case ISO8601TimeFormatName: - fallthrough - case UnixTimeFormatName: - return true - default: - return false - } -} - -// FormatTime returns a string value of the time. -func FormatTime(name string, t time.Time) string { - t = t.UTC() - - switch name { - case RFC822TimeFormatName: - return t.Format(RFC822OutputTimeFormat) - case ISO8601TimeFormatName: - return t.Format(ISO8601OutputTimeFormat) - case UnixTimeFormatName: - return strconv.FormatInt(t.Unix(), 10) - default: - panic("unknown timestamp format name, " + name) - } -} - -// ParseTime attempts to parse the time given the format. Returns -// the time if it was able to be parsed, and fails otherwise. -func ParseTime(formatName, value string) (time.Time, error) { - switch formatName { - case RFC822TimeFormatName: - return time.Parse(RFC822TimeFormat, value) - case ISO8601TimeFormatName: - return time.Parse(ISO8601TimeFormat, value) - case UnixTimeFormatName: - v, err := strconv.ParseFloat(value, 64) - _, dec := math.Modf(v) - dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 - if err != nil { - return time.Time{}, err - } - return time.Unix(int64(v), int64(dec*(1e9))), nil - default: - panic("unknown timestamp format name, " + formatName) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go deleted file mode 100644 index f614ef89..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ /dev/null @@ -1,27 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body -var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} - -// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. -func UnmarshalDiscardBody(r *request.Request) { - if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { - return - } - - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() -} - -// ResponseMetadata provides the SDK response metadata attributes. -type ResponseMetadata struct { - StatusCode int - RequestID string -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go deleted file mode 100644 index cc857f13..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go +++ /dev/null @@ -1,65 +0,0 @@ -package protocol - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalErrorHandler provides unmarshaling errors API response errors for -// both typed and untyped errors. -type UnmarshalErrorHandler struct { - unmarshaler ErrorUnmarshaler -} - -// ErrorUnmarshaler is an abstract interface for concrete implementations to -// unmarshal protocol specific response errors. -type ErrorUnmarshaler interface { - UnmarshalError(*http.Response, ResponseMetadata) (error, error) -} - -// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler -// initialized for the set of exception names to the error unmarshalers -func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { - return &UnmarshalErrorHandler{ - unmarshaler: unmarshaler, - } -} - -// UnmarshalErrorHandlerName is the name of the named handler. -const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" - -// NamedHandler returns a NamedHandler for the unmarshaler using the set of -// errors the unmarshaler was initialized for. -func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { - return request.NamedHandler{ - Name: UnmarshalErrorHandlerName, - Fn: u.UnmarshalError, - } -} - -// UnmarshalError will attempt to unmarshal the API response's error message -// into either a generic SDK error type, or a typed error corresponding to the -// errors exception name. -func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - respMeta := ResponseMetadata{ - StatusCode: r.HTTPResponse.StatusCode, - RequestID: r.RequestID, - } - - v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), - respMeta.StatusCode, - respMeta.RequestID, - ) - return - } - - r.Error = v -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go deleted file mode 100644 index cf981fe9..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ /dev/null @@ -1,306 +0,0 @@ -// Package xmlutil provides XML serialization of AWS requests and responses. -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// BuildXML will serialize params into an xml.Encoder. Error will be returned -// if the serialization of any of the params or nested values fails. -func BuildXML(params interface{}, e *xml.Encoder) error { - return buildXML(params, e, false) -} - -func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { - b := xmlBuilder{encoder: e, namespaces: map[string]string{}} - root := NewXMLElement(xml.Name{}) - if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { - return err - } - for _, c := range root.Children { - for _, v := range c { - return StructToXML(e, v, sorted) - } - } - return nil -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -// A xmlBuilder serializes values from Go code to XML -type xmlBuilder struct { - encoder *xml.Encoder - namespaces map[string]string -} - -// buildValue generic XMLNode builder for any type. Will build value for their specific type -// struct, list, map, scalar. -// -// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If -// type is not provided reflect will be used to determine the value's type. -func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - value = elemOf(value) - if !value.IsValid() { // no need to handle zero values - return nil - } else if tag.Get("location") != "" { // don't handle non-body location values - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := value.Type().FieldByName("_"); ok { - tag = tag + reflect.StructTag(" ") + field.Tag - } - return b.buildStruct(value, current, tag) - case "list": - return b.buildList(value, current, tag) - case "map": - return b.buildMap(value, current, tag) - default: - return b.buildScalar(value, current, tag) - } -} - -// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested -// types are converted to XMLNodes also. -func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - - // there is an xmlNamespace associated with this struct - if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { - ns := xml.Attr{ - Name: xml.Name{Local: "xmlns"}, - Value: uri, - } - if prefix != "" { - b.namespaces[prefix] = uri // register the namespace - ns.Name.Local = "xmlns:" + prefix - } - - child.Attr = append(child.Attr, ns) - } - - var payloadFields, nonPayloadFields int - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - member := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - mTag := field.Tag - if mTag.Get("location") != "" { // skip non-body members - nonPayloadFields++ - continue - } - payloadFields++ - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(token) - } - - memberName := mTag.Get("locationName") - if memberName == "" { - memberName = field.Name - mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) - } - if err := b.buildValue(member, child, mTag); err != nil { - return err - } - } - - // Only case where the child shape is not added is if the shape only contains - // non-payload fields, e.g headers/query. - if !(payloadFields == 0 && nonPayloadFields > 0) { - current.AddChild(child) - } - - return nil -} - -// buildList adds the value's list items to the current XMLNode as children nodes. All -// nested values in the list are converted to XMLNodes also. -func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted lists - return nil - } - - // check for unflattened list member - flattened := tag.Get("flattened") != "" - - xname := xml.Name{Local: tag.Get("locationName")} - if flattened { - for i := 0; i < value.Len(); i++ { - child := NewXMLElement(xname) - current.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } else { - list := NewXMLElement(xname) - current.AddChild(list) - - for i := 0; i < value.Len(); i++ { - iname := tag.Get("locationNameList") - if iname == "" { - iname = "member" - } - - child := NewXMLElement(xml.Name{Local: iname}) - list.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } - - return nil -} - -// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All -// nested values in the map are converted to XMLNodes also. -// -// Error will be returned if it is unable to build the map's values into XMLNodes -func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted maps - return nil - } - - maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - current.AddChild(maproot) - current = maproot - - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - // sorting is not required for compliance, but it makes testing easier - keys := make([]string, value.Len()) - for i, k := range value.MapKeys() { - keys[i] = k.String() - } - sort.Strings(keys) - - for _, k := range keys { - v := value.MapIndex(reflect.ValueOf(k)) - - mapcur := current - if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps - child := NewXMLElement(xml.Name{Local: "entry"}) - mapcur.AddChild(child) - mapcur = child - } - - kchild := NewXMLElement(xml.Name{Local: kname}) - kchild.Text = k - vchild := NewXMLElement(xml.Name{Local: vname}) - mapcur.AddChild(kchild) - mapcur.AddChild(vchild) - - if err := b.buildValue(v, vchild, ""); err != nil { - return err - } - } - - return nil -} - -// buildScalar will convert the value into a string and append it as a attribute or child -// of the current XMLNode. -// -// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. -// -// Error will be returned if the value type is unsupported. -func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - var str string - switch converted := value.Interface().(type) { - case string: - str = converted - case []byte: - if !value.IsNil() { - str = base64.StdEncoding.EncodeToString(converted) - } - case bool: - str = strconv.FormatBool(converted) - case int64: - str = strconv.FormatInt(converted, 10) - case int: - str = strconv.Itoa(converted) - case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) - case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - str = protocol.FormatTime(format, converted) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", - tag.Get("locationName"), value.Interface(), value.Type().Name()) - } - - xname := xml.Name{Local: tag.Get("locationName")} - if tag.Get("xmlAttribute") != "" { // put into current node's attribute list - attr := xml.Attr{Name: xname, Value: str} - current.Attr = append(current.Attr, attr) - } else { // regular text node - current.AddChild(&XMLNode{Name: xname, Text: str}) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go deleted file mode 100644 index c1a51185..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go +++ /dev/null @@ -1,32 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "strings" -) - -type xmlAttrSlice []xml.Attr - -func (x xmlAttrSlice) Len() int { - return len(x) -} - -func (x xmlAttrSlice) Less(i, j int) bool { - spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space - localI, localJ := x[i].Name.Local, x[j].Name.Local - valueI, valueJ := x[i].Value, x[j].Value - - spaceCmp := strings.Compare(spaceI, spaceJ) - localCmp := strings.Compare(localI, localJ) - valueCmp := strings.Compare(valueI, valueJ) - - if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { - return true - } - - return false -} - -func (x xmlAttrSlice) Swap(i, j int) { - x[i], x[j] = x[j], x[i] -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go deleted file mode 100644 index 7108d380..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ /dev/null @@ -1,291 +0,0 @@ -package xmlutil - -import ( - "bytes" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalXMLError unmarshals the XML error from the stream into the value -// type specified. The value must be a pointer. If the message fails to -// unmarshal, the message content will be included in the returned error as a -// awserr.UnmarshalError. -func UnmarshalXMLError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := xml.NewDecoder(body).Decode(v) - if err != nil && err != io.EOF { - return awserr.NewUnmarshalError(err, - "failed to unmarshal error message", errBuf.Bytes()) - } - - return nil -} - -// UnmarshalXML deserializes an xml.Decoder into the container v. V -// needs to match the shape of the XML expected to be decoded. -// If the shape doesn't match unmarshaling will fail. -func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, err := XMLToStruct(d, nil) - if err != nil { - return err - } - if n.Children != nil { - for _, root := range n.Children { - for _, c := range root { - if wrappedChild, ok := c.Children[wrapper]; ok { - c = wrappedChild[0] // pull out wrapped element - } - - err = parse(reflect.ValueOf(v), c, "") - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } - } - return nil - } - return nil -} - -// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect -// will be used to determine the type from r. -func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - rtype := r.Type() - if rtype.Kind() == reflect.Ptr { - rtype = rtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch rtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := r.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := r.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := rtype.FieldByName("_"); ok { - tag = field.Tag - } - return parseStruct(r, node, tag) - case "list": - return parseList(r, node, tag) - case "map": - return parseMap(r, node, tag) - default: - return parseScalar(r, node, tag) - } -} - -// parseStruct deserializes a structure and its fields from an XMLNode. Any nested -// types in the structure will also be deserialized. -func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - if r.Kind() == reflect.Ptr { - if r.IsNil() { // create the structure if it's nil - s := reflect.New(r.Type().Elem()) - r.Set(s) - r = s - } - - r = r.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return parseStruct(r.FieldByName(payload), node, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if c := field.Name[0:1]; strings.ToLower(c) == c { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - // try to find the field by name in elements - elems := node.Children[name] - - if elems == nil { // try to find the field in attributes - if val, ok := node.findElem(name); ok { - elems = []*XMLNode{{Text: val}} - } - } - - member := r.FieldByName(field.Name) - for _, elem := range elems { - err := parse(member, elem, field.Tag) - if err != nil { - return err - } - } - } - return nil -} - -// parseList deserializes a list of values from an XML node. Each list entry -// will also be deserialized. -func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - - if tag.Get("flattened") == "" { // look at all item entries - mname := "member" - if name := tag.Get("locationNameList"); name != "" { - mname = name - } - - if Children, ok := node.Children[mname]; ok { - if r.IsNil() { - r.Set(reflect.MakeSlice(t, len(Children), len(Children))) - } - - for i, c := range Children { - err := parse(r.Index(i), c, "") - if err != nil { - return err - } - } - } - } else { // flattened list means this is a single element - if r.IsNil() { - r.Set(reflect.MakeSlice(t, 0, 0)) - } - - childR := reflect.Zero(t.Elem()) - r.Set(reflect.Append(r, childR)) - err := parse(r.Index(r.Len()-1), node, "") - if err != nil { - return err - } - } - - return nil -} - -// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode -// will also be deserialized as map entries. -func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - if r.IsNil() { - r.Set(reflect.MakeMap(r.Type())) - } - - if tag.Get("flattened") == "" { // look at all child entries - for _, entry := range node.Children["entry"] { - parseMapEntry(r, entry, tag) - } - } else { // this element is itself an entry - parseMapEntry(r, node, tag) - } - - return nil -} - -// parseMapEntry deserializes a map entry from a XML node. -func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - keys, ok := node.Children[kname] - values := node.Children[vname] - if ok { - for i, key := range keys { - keyR := reflect.ValueOf(key.Text) - value := values[i] - valueR := reflect.New(r.Type().Elem()).Elem() - - parse(valueR, value, "") - r.SetMapIndex(keyR, valueR) - } - } - return nil -} - -// parseScaller deserializes an XMLNode value into a concrete type based on the -// interface type of r. -// -// Error is returned if the deserialization fails due to invalid type conversion, -// or unsupported interface type. -func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - switch r.Interface().(type) { - case *string: - r.Set(reflect.ValueOf(&node.Text)) - return nil - case []byte: - b, err := base64.StdEncoding.DecodeString(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(b)) - case *bool: - v, err := strconv.ParseBool(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *int64: - v, err := strconv.ParseInt(node.Text, 10, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go deleted file mode 100644 index 42f71648..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ /dev/null @@ -1,159 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "fmt" - "io" - "sort" -) - -// A XMLNode contains the values to be encoded or decoded. -type XMLNode struct { - Name xml.Name `json:",omitempty"` - Children map[string][]*XMLNode `json:",omitempty"` - Text string `json:",omitempty"` - Attr []xml.Attr `json:",omitempty"` - - namespaces map[string]string - parent *XMLNode -} - -// NewXMLElement returns a pointer to a new XMLNode initialized to default values. -func NewXMLElement(name xml.Name) *XMLNode { - return &XMLNode{ - Name: name, - Children: map[string][]*XMLNode{}, - Attr: []xml.Attr{}, - } -} - -// AddChild adds child to the XMLNode. -func (n *XMLNode) AddChild(child *XMLNode) { - child.parent = n - if _, ok := n.Children[child.Name.Local]; !ok { - n.Children[child.Name.Local] = []*XMLNode{} - } - n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) -} - -// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. -func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { - out := &XMLNode{} - for { - tok, err := d.Token() - if err != nil { - if err == io.EOF { - break - } else { - return out, err - } - } - - if tok == nil { - break - } - - switch typed := tok.(type) { - case xml.CharData: - out.Text = string(typed.Copy()) - case xml.StartElement: - el := typed.Copy() - out.Attr = el.Attr - if out.Children == nil { - out.Children = map[string][]*XMLNode{} - } - - name := typed.Name.Local - slice := out.Children[name] - if slice == nil { - slice = []*XMLNode{} - } - node, e := XMLToStruct(d, &el) - out.findNamespaces() - if e != nil { - return out, e - } - node.Name = typed.Name - node.findNamespaces() - tempOut := *out - // Save into a temp variable, simply because out gets squashed during - // loop iterations - node.parent = &tempOut - slice = append(slice, node) - out.Children[name] = slice - case xml.EndElement: - if s != nil && s.Name.Local == typed.Name.Local { // matching end token - return out, nil - } - out = &XMLNode{} - } - } - return out, nil -} - -func (n *XMLNode) findNamespaces() { - ns := map[string]string{} - for _, a := range n.Attr { - if a.Name.Space == "xmlns" { - ns[a.Value] = a.Name.Local - } - } - - n.namespaces = ns -} - -func (n *XMLNode) findElem(name string) (string, bool) { - for node := n; node != nil; node = node.parent { - for _, a := range node.Attr { - namespace := a.Name.Space - if v, ok := node.namespaces[namespace]; ok { - namespace = v - } - if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { - return a.Value, true - } - } - } - return "", false -} - -// StructToXML writes an XMLNode to a xml.Encoder as tokens. -func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - // Sort Attributes - attrs := node.Attr - if sorted { - sortedAttrs := make([]xml.Attr, len(attrs)) - for _, k := range node.Attr { - sortedAttrs = append(sortedAttrs, k) - } - sort.Sort(xmlAttrSlice(sortedAttrs)) - attrs = sortedAttrs - } - - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) - - if node.Text != "" { - e.EncodeToken(xml.CharData([]byte(node.Text))) - } else if sorted { - sortedNames := []string{} - for k := range node.Children { - sortedNames = append(sortedNames, k) - } - sort.Strings(sortedNames) - - for _, k := range sortedNames { - for _, v := range node.Children[k] { - StructToXML(e, v, sorted) - } - } - } else { - for _, c := range node.Children { - for _, v := range c { - StructToXML(e, v, sorted) - } - } - } - - e.EncodeToken(xml.EndElement{Name: node.Name}) - return e.Flush() -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go deleted file mode 100644 index 52e87308..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ /dev/null @@ -1,30696 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -import ( - "bytes" - "fmt" - "io" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/eventstream" - "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" - "github.com/aws/aws-sdk-go/private/protocol/rest" - "github.com/aws/aws-sdk-go/private/protocol/restxml" - "github.com/aws/aws-sdk-go/service/s3/internal/arn" -) - -const opAbortMultipartUpload = "AbortMultipartUpload" - -// AbortMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the AbortMultipartUpload operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AbortMultipartUpload for more information on using the AbortMultipartUpload -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AbortMultipartUploadRequest method. -// req, resp := client.AbortMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload -func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { - op := &request.Operation{ - Name: opAbortMultipartUpload, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &AbortMultipartUploadInput{} - } - - output = &AbortMultipartUploadOutput{} - req = c.newRequest(op, input, output) - return -} - -// AbortMultipartUpload API operation for Amazon Simple Storage Service. -// -// This operation aborts a multipart upload. After a multipart upload is aborted, -// no additional parts can be uploaded using that upload ID. The storage consumed -// by any previously uploaded parts will be freed. However, if any part uploads -// are currently in progress, those part uploads might or might not succeed. -// As a result, it might be necessary to abort a given multipart upload multiple -// times in order to completely free all storage consumed by all parts. -// -// To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the ListParts operation and ensure that -// the parts list is empty. -// -// For information about permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// -// The following operations are related to AbortMultipartUpload: -// -// * CreateMultipartUpload -// -// * UploadPart -// -// * CompleteMultipartUpload -// -// * ListParts -// -// * ListMultipartUploads -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation AbortMultipartUpload for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchUpload "NoSuchUpload" -// The specified multipart upload does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload -func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { - req, out := c.AbortMultipartUploadRequest(input) - return out, req.Send() -} - -// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of -// the ability to pass a context and additional request options. -// -// See AbortMultipartUpload for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { - req, out := c.AbortMultipartUploadRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCompleteMultipartUpload = "CompleteMultipartUpload" - -// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the CompleteMultipartUpload operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CompleteMultipartUploadRequest method. -// req, resp := client.CompleteMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload -func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { - op := &request.Operation{ - Name: opCompleteMultipartUpload, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &CompleteMultipartUploadInput{} - } - - output = &CompleteMultipartUploadOutput{} - req = c.newRequest(op, input, output) - return -} - -// CompleteMultipartUpload API operation for Amazon Simple Storage Service. -// -// Completes a multipart upload by assembling previously uploaded parts. -// -// You first initiate the multipart upload and then upload all parts using the -// UploadPart operation. After successfully uploading all relevant parts of -// an upload, you call this operation to complete the upload. Upon receiving -// this request, Amazon S3 concatenates all the parts in ascending order by -// part number to create a new object. In the Complete Multipart Upload request, -// you must provide the parts list. You must ensure that the parts list is complete. -// This operation concatenates the parts that you provide in the list. For each -// part in the list, you must provide the part number and the ETag value, returned -// after that part was uploaded. -// -// Processing of a Complete Multipart Upload request could take several minutes -// to complete. After Amazon S3 begins processing the request, it sends an HTTP -// response header that specifies a 200 OK response. While processing is in -// progress, Amazon S3 periodically sends white space characters to keep the -// connection from timing out. Because a request could fail after the initial -// 200 OK response has been sent, it is important that you check the response -// body to determine whether the request succeeded. -// -// Note that if CompleteMultipartUpload fails, applications should be prepared -// to retry the failed requests. For more information, see Amazon S3 Error Best -// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). -// -// For more information about multipart uploads, see Uploading Objects Using -// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). -// -// For information about permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// -// GetBucketLifecycle has the following special errors: -// -// * Error code: EntityTooSmall Description: Your proposed upload is smaller -// than the minimum allowed object size. Each part must be at least 5 MB -// in size, except the last part. 400 Bad Request -// -// * Error code: InvalidPart Description: One or more of the specified parts -// could not be found. The part might not have been uploaded, or the specified -// entity tag might not have matched the part's entity tag. 400 Bad Request -// -// * Error code: InvalidPartOrder Description: The list of parts was not -// in ascending order. The parts list must be specified in order by part -// number. 400 Bad Request -// -// * Error code: NoSuchUpload Description: The specified multipart upload -// does not exist. The upload ID might be invalid, or the multipart upload -// might have been aborted or completed. 404 Not Found -// -// The following operations are related to DeleteBucketMetricsConfiguration: -// -// * CreateMultipartUpload -// -// * UploadPart -// -// * AbortMultipartUpload -// -// * ListParts -// -// * ListMultipartUploads -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CompleteMultipartUpload for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload -func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { - req, out := c.CompleteMultipartUploadRequest(input) - return out, req.Send() -} - -// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of -// the ability to pass a context and additional request options. -// -// See CompleteMultipartUpload for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { - req, out := c.CompleteMultipartUploadRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCopyObject = "CopyObject" - -// CopyObjectRequest generates a "aws/request.Request" representing the -// client's request for the CopyObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CopyObject for more information on using the CopyObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CopyObjectRequest method. -// req, resp := client.CopyObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject -func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { - op := &request.Operation{ - Name: opCopyObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &CopyObjectInput{} - } - - output = &CopyObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// CopyObject API operation for Amazon Simple Storage Service. -// -// Creates a copy of an object that is already stored in Amazon S3. -// -// You can store individual objects of up to 5 TB in Amazon S3. You create a -// copy of your object up to 5 GB in size in a single atomic operation using -// this API. However, for copying an object greater than 5 GB, you must use -// the multipart upload Upload Part - Copy API. For more information, see Copy -// Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). -// -// When copying an object, you can preserve all metadata (default) or specify -// new metadata. However, the ACL is not preserved and is set to private for -// the user making the request. To override the default ACL setting, specify -// a new ACL when generating a copy request. For more information, see Using -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// -// Amazon S3 transfer acceleration does not support cross-region copies. If -// you request a cross-region copy using a transfer acceleration endpoint, you -// get a 400 Bad Request error. For more information about transfer acceleration, -// see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). -// -// All copy requests must be authenticated. Additionally, you must have read -// access to the source object and write access to the destination bucket. For -// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). -// Both the Region that you want to copy the object from and the Region that -// you want to copy the object to must be enabled for your account. -// -// To only copy an object under certain conditions, such as whether the Etag -// matches or whether the object was modified before or after a specified date, -// use the request parameters x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, -// x-amz-copy-source-if-unmodified-since, or x-amz-copy-source-if-modified-since. -// -// All headers with the x-amz- prefix, including x-amz-copy-source, must be -// signed. -// -// You can use this operation to change the storage class of an object that -// is already stored in Amazon S3 using the StorageClass parameter. For more -// information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). -// -// The source object that you are copying can be encrypted or unencrypted. If -// the source object is encrypted, it can be encrypted by server-side encryption -// using AWS managed encryption keys or by using a customer-provided encryption -// key. When copying an object, you can request that Amazon S3 encrypt the target -// object by using either the AWS managed encryption keys or by using your own -// encryption key. You can do this regardless of the form of server-side encryption -// that was used to encrypt the source, or even if the source object was not -// encrypted. For more information about server-side encryption, see Using Server-Side -// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). -// -// A copy request might return an error when Amazon S3 receives the copy request -// or while Amazon S3 is copying the files. If the error occurs before the copy -// operation starts, you receive a standard Amazon S3 error. If the error occurs -// during the copy operation, the error response is embedded in the 200 OK response. -// This means that a 200 OK response can contain either a success or an error. -// Design your application to parse the contents of the response and handle -// it appropriately. -// -// If the copy is successful, you receive a response with information about -// the copied object. -// -// If the request is an HTTP 1.1 request, the response is chunk encoded. If -// it were not, it would not contain the content-length, and you would need -// to read the entire body. -// -// Consider the following when using request headers: -// -// * Consideration 1 – If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request and evaluate as follows, Amazon S3 -// returns 200 OK and copies the data: x-amz-copy-source-if-match condition -// evaluates to true x-amz-copy-source-if-unmodified-since condition evaluates -// to false -// -// * Consideration 2 – If both of the x-amz-copy-source-if-none-match and -// x-amz-copy-source-if-modified-since headers are present in the request -// and evaluate as follows, Amazon S3 returns the 412 Precondition Failed -// response code: x-amz-copy-source-if-none-match condition evaluates to -// false x-amz-copy-source-if-modified-since condition evaluates to true -// -// The copy request charge is based on the storage class and Region you specify -// for the destination object. For pricing information, see Amazon S3 Pricing -// (https://aws.amazon.com/s3/pricing/). -// -// Following are other considerations when using CopyObject: -// -// Versioning -// -// By default, x-amz-copy-source identifies the current version of an object -// to copy. (If the current version is a delete marker, Amazon S3 behaves as -// if the object was deleted.) To copy a different version, use the versionId -// subresource. -// -// If you enable versioning on the target bucket, Amazon S3 generates a unique -// version ID for the object being copied. This version ID is different from -// the version ID of the source object. Amazon S3 returns the version ID of -// the copied object in the x-amz-version-id response header in the response. -// -// If you do not enable versioning or suspend it on the target bucket, the version -// ID that Amazon S3 generates is always null. -// -// If the source object's storage class is GLACIER, you must restore a copy -// of this object before you can use it as a source object for the copy operation. -// For more information, see . -// -// Access Permissions -// -// When copying an object, you can optionally specify the accounts or groups -// that should be granted specific permissions on the new object. There are -// two ways to grant the permissions using the request headers: -// -// * Specify a canned ACL with the x-amz-acl request header. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters -// map to the set of permissions that Amazon S3 supports in an ACL. For more -// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// Server-Side- Encryption-Specific Request Headers -// -// To encrypt the target object, you must provide the appropriate encryption-related -// request headers. The one you use depends on whether you want to use AWS managed -// encryption keys or provide your own encryption key. -// -// * To encrypt the target object using server-side encryption with an AWS -// managed encryption key, provide the following request headers, as appropriate. -// x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id -// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, -// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon -// S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want -// to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id -// of the symmetric customer managed CMK. Amazon S3 only supports symmetric -// CMKs and not asymmetric CMKs. For more information, see Using Symmetric -// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the AWS Key Management Service Developer Guide. All GET and PUT requests -// for an object protected by AWS KMS fail if you don't make them with SSL -// or by using SigV4. For more information about server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with CMKs stored in KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// * To encrypt the target object using server-side encryption with an encryption -// key that you provide, use the following headers. x-amz-server-side​-encryption​-customer-algorithm -// x-amz-server-side​-encryption​-customer-key x-amz-server-side​-encryption​-customer-key-MD5 -// -// * If the source object is encrypted using server-side encryption with -// customer-provided encryption keys, you must use the following headers. -// x-amz-copy-source​-server-side​-encryption​-customer-algorithm x-amz-copy-source​-server-side​-encryption​-customer-key -// x-amz-copy-source-​server-side​-encryption​-customer-key-MD5 For -// more information about server-side encryption with CMKs stored in AWS -// KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs -// stored in Amazon KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// Access-Control-List (ACL)-Specific Request Headers -// -// You also can use the following access control–related headers with this -// operation. By default, all objects are private. Only the owner has full access -// control. When adding a new object, you can grant permissions to individual -// AWS accounts or to predefined groups defined by Amazon S3. These permissions -// are then added to the access control list (ACL) on the object. For more information, -// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// With this operation, you can grant access permissions using one of the following -// two methods: -// -// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined -// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees -// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly — To explicitly grant access -// permissions to specific AWS accounts or groups, use the following headers. -// Each header maps to specific permissions that Amazon S3 supports in an -// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// In the header, you specify a list of grantees who get the specific permission. -// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write -// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You -// specify each grantee as a type=value pair, where the type is one of the -// following: emailAddress – if the value specified is the email address -// of an AWS account id – if the value specified is the canonical user -// ID of an AWS account uri – if you are granting permissions to a predefined -// group For example, the following x-amz-grant-read header grants the AWS -// accounts identified by email addresses permissions to read object data -// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" -// -// The following operations are related to CopyObject: -// -// * PutObject -// -// * GetObject -// -// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CopyObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" -// The source object of the COPY operation is not in the active tier and is -// only stored in Amazon S3 Glacier. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject -func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { - req, out := c.CopyObjectRequest(input) - return out, req.Send() -} - -// CopyObjectWithContext is the same as CopyObject with the addition of -// the ability to pass a context and additional request options. -// -// See CopyObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { - req, out := c.CopyObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateBucket = "CreateBucket" - -// CreateBucketRequest generates a "aws/request.Request" representing the -// client's request for the CreateBucket operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateBucket for more information on using the CreateBucket -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateBucketRequest method. -// req, resp := client.CreateBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket -func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { - op := &request.Operation{ - Name: opCreateBucket, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &CreateBucketInput{} - } - - output = &CreateBucketOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateBucket API operation for Amazon Simple Storage Service. -// -// Creates a new bucket. To create a bucket, you must register with Amazon S3 -// and have a valid AWS Access Key ID to authenticate requests. Anonymous requests -// are never allowed to create buckets. By creating the bucket, you become the -// bucket owner. -// -// Not every string is an acceptable bucket name. For information on bucket -// naming restrictions, see Working with Amazon S3 Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). -// -// By default, the bucket is created in the US East (N. Virginia) Region. You -// can optionally specify a Region in the request body. You might choose a Region -// to optimize latency, minimize costs, or address regulatory requirements. -// For example, if you reside in Europe, you will probably find it advantageous -// to create buckets in the EU (Ireland) Region. For more information, see How -// to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). -// -// If you send your create bucket request to the s3.amazonaws.com endpoint, -// the request goes to the us-east-1 Region. Accordingly, the signature calculations -// in Signature Version 4 must use us-east-1 as the Region, even if the location -// constraint in the request specifies another Region where the bucket is to -// be created. If you create a bucket in a Region other than US East (N. Virginia), -// your application must be able to handle 307 redirect. For more information, -// see Virtual Hosting of Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). -// -// When creating a bucket using this operation, you can optionally specify the -// accounts or groups that should be granted specific permissions on the bucket. -// There are two ways to grant the appropriate permissions using the request -// headers. -// -// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports -// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a -// predefined set of grantees and permissions. For more information, see -// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, -// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control -// headers. These headers map to the set of permissions Amazon S3 supports -// in an ACL. For more information, see Access Control List (ACL) Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You -// specify each grantee as a type=value pair, where the type is one of the -// following: emailAddress – if the value specified is the email address -// of an AWS account id – if the value specified is the canonical user -// ID of an AWS account uri – if you are granting permissions to a predefined -// group For example, the following x-amz-grant-read header grants the AWS -// accounts identified by email addresses permissions to read object data -// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// The following operations are related to CreateBucket: -// -// * PutObject -// -// * DeleteBucket -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CreateBucket for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" -// The requested bucket name is not available. The bucket namespace is shared -// by all users of the system. Please select a different name and try again. -// -// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" -// The bucket you tried to create already exists, and you own it. Amazon S3 -// returns this error in all AWS Regions except in the North Virginia Region. -// For legacy compatibility, if you re-create an existing bucket that you already -// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the -// bucket access control lists (ACLs). -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket -func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { - req, out := c.CreateBucketRequest(input) - return out, req.Send() -} - -// CreateBucketWithContext is the same as CreateBucket with the addition of -// the ability to pass a context and additional request options. -// -// See CreateBucket for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { - req, out := c.CreateBucketRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateMultipartUpload = "CreateMultipartUpload" - -// CreateMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the CreateMultipartUpload operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateMultipartUpload for more information on using the CreateMultipartUpload -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateMultipartUploadRequest method. -// req, resp := client.CreateMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload -func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { - op := &request.Operation{ - Name: opCreateMultipartUpload, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?uploads", - } - - if input == nil { - input = &CreateMultipartUploadInput{} - } - - output = &CreateMultipartUploadOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateMultipartUpload API operation for Amazon Simple Storage Service. -// -// This operation initiates a multipart upload and returns an upload ID. This -// upload ID is used to associate all of the parts in the specific multipart -// upload. You specify this upload ID in each of your subsequent upload part -// requests (see UploadPart). You also include this upload ID in the final request -// to either complete or abort the multipart upload request. -// -// For more information about multipart uploads, see Multipart Upload Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). -// -// If you have configured a lifecycle rule to abort incomplete multipart uploads, -// the upload must complete within the number of days specified in the bucket -// lifecycle configuration. Otherwise, the incomplete multipart upload becomes -// eligible for an abort operation and Amazon S3 aborts the multipart upload. -// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). -// -// For information about the permissions required to use the multipart upload -// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// -// For request signing, multipart upload is just a series of regular requests. -// You initiate a multipart upload, send one or more requests to upload parts, -// and then complete the multipart upload process. You sign each request individually. -// There is nothing special about signing multipart upload requests. For more -// information about signing, see Authenticating Requests (AWS Signature Version -// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). -// -// After you initiate a multipart upload and upload one or more parts, to stop -// being charged for storing the uploaded parts, you must either complete or -// abort the multipart upload. Amazon S3 frees up the space used to store the -// parts and stop charging you for storing them only after you either complete -// or abort a multipart upload. -// -// You can optionally request server-side encryption. For server-side encryption, -// Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts it when you access it. You can provide your own encryption key, -// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or -// Amazon S3-managed encryption keys. If you choose to provide your own encryption -// key, the request headers you provide in UploadPart) and UploadPartCopy) requests -// must match the headers you used in the request to initiate the upload by -// using CreateMultipartUpload. -// -// To perform a multipart upload with encryption using an AWS KMS CMK, the requester -// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, -// and kms:DescribeKey actions on the key. These permissions are required because -// Amazon S3 must decrypt and read data from the encrypted file parts before -// it completes the multipart upload. -// -// If your AWS Identity and Access Management (IAM) user or role is in the same -// AWS account as the AWS KMS CMK, then you must have these permissions on the -// key policy. If your IAM user or role belongs to a different account than -// the key, then you must have the permissions on both the key policy and your -// IAM user or role. -// -// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). -// -// Access Permissions -// -// When copying an object, you can optionally specify the accounts or groups -// that should be granted specific permissions on the new object. There are -// two ways to grant the permissions using the request headers: -// -// * Specify a canned ACL with the x-amz-acl request header. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters -// map to the set of permissions that Amazon S3 supports in an ACL. For more -// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// Server-Side- Encryption-Specific Request Headers -// -// You can optionally tell Amazon S3 to encrypt data at rest using server-side -// encryption. Server-side encryption is for data encryption at rest. Amazon -// S3 encrypts your data as it writes it to disks in its data centers and decrypts -// it when you access it. The option you use depends on whether you want to -// use AWS managed encryption keys or provide your own encryption key. -// -// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) -// stored in AWS Key Management Service (AWS KMS) – If you want AWS to -// manage the keys used to encrypt data, specify the following headers in -// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id -// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, -// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon -// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and -// PUT requests for an object protected by AWS KMS fail if you don't make -// them with SSL or by using SigV4. For more information about server-side -// encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data -// Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// * Use customer-provided encryption keys – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// x-amz-server-side​-encryption​-customer-algorithm x-amz-server-side​-encryption​-customer-key -// x-amz-server-side​-encryption​-customer-key-MD5 For more information -// about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see -// Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// Access-Control-List (ACL)-Specific Request Headers -// -// You also can use the following access control–related headers with this -// operation. By default, all objects are private. Only the owner has full access -// control. When adding a new object, you can grant permissions to individual -// AWS accounts or to predefined groups defined by Amazon S3. These permissions -// are then added to the access control list (ACL) on the object. For more information, -// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// With this operation, you can grant access permissions using one of the following -// two methods: -// -// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined -// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees -// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly — To explicitly grant access -// permissions to specific AWS accounts or groups, use the following headers. -// Each header maps to specific permissions that Amazon S3 supports in an -// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// In the header, you specify a list of grantees who get the specific permission. -// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write -// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You -// specify each grantee as a type=value pair, where the type is one of the -// following: emailAddress – if the value specified is the email address -// of an AWS account id – if the value specified is the canonical user -// ID of an AWS account uri – if you are granting permissions to a predefined -// group For example, the following x-amz-grant-read header grants the AWS -// accounts identified by email addresses permissions to read object data -// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" -// -// The following operations are related to CreateMultipartUpload: -// -// * UploadPart -// -// * CompleteMultipartUpload -// -// * AbortMultipartUpload -// -// * ListParts -// -// * ListMultipartUploads -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CreateMultipartUpload for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload -func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { - req, out := c.CreateMultipartUploadRequest(input) - return out, req.Send() -} - -// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of -// the ability to pass a context and additional request options. -// -// See CreateMultipartUpload for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { - req, out := c.CreateMultipartUploadRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucket = "DeleteBucket" - -// DeleteBucketRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucket operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucket for more information on using the DeleteBucket -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketRequest method. -// req, resp := client.DeleteBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket -func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { - op := &request.Operation{ - Name: opDeleteBucket, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &DeleteBucketInput{} - } - - output = &DeleteBucketOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucket API operation for Amazon Simple Storage Service. -// -// Deletes the bucket. All objects (including all object versions and delete -// markers) in the bucket must be deleted before the bucket itself can be deleted. -// -// Related Resources -// -// * -// -// * -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucket for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket -func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { - req, out := c.DeleteBucketRequest(input) - return out, req.Send() -} - -// DeleteBucketWithContext is the same as DeleteBucket with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucket for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { - req, out := c.DeleteBucketRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" - -// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. -// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration -func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteBucketAnalyticsConfiguration, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &DeleteBucketAnalyticsConfigurationInput{} - } - - output = &DeleteBucketAnalyticsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. -// -// Deletes an analytics configuration for the bucket (specified by the analytics -// configuration ID). -// -// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics -// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). -// -// The following operations are related to DeleteBucketAnalyticsConfiguration: -// -// * -// -// * -// -// * -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketAnalyticsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration -func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { - req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) - return out, req.Send() -} - -// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { - req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketCors = "DeleteBucketCors" - -// DeleteBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketCors operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketCors for more information on using the DeleteBucketCors -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketCorsRequest method. -// req, resp := client.DeleteBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors -func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { - op := &request.Operation{ - Name: opDeleteBucketCors, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &DeleteBucketCorsInput{} - } - - output = &DeleteBucketCorsOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketCors API operation for Amazon Simple Storage Service. -// -// Deletes the cors configuration information set for the bucket. -// -// To use this operation, you must have permission to perform the s3:PutBucketCORS -// action. The bucket owner has this permission by default and can grant this -// permission to others. -// -// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Related Resources: -// -// * -// -// * RESTOPTIONSobject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketCors for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors -func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { - req, out := c.DeleteBucketCorsRequest(input) - return out, req.Send() -} - -// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketCors for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { - req, out := c.DeleteBucketCorsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketEncryption = "DeleteBucketEncryption" - -// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketEncryption operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketEncryptionRequest method. -// req, resp := client.DeleteBucketEncryptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption -func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { - op := &request.Operation{ - Name: opDeleteBucketEncryption, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?encryption", - } - - if input == nil { - input = &DeleteBucketEncryptionInput{} - } - - output = &DeleteBucketEncryptionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketEncryption API operation for Amazon Simple Storage Service. -// -// This implementation of the DELETE operation removes default encryption from -// the bucket. For information about the Amazon S3 default encryption feature, -// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev//bucket-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Related Resources -// -// * PutBucketEncryption -// -// * GetBucketEncryption -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketEncryption for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption -func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { - req, out := c.DeleteBucketEncryptionRequest(input) - return out, req.Send() -} - -// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketEncryption for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { - req, out := c.DeleteBucketEncryptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" - -// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. -// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration -func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteBucketInventoryConfiguration, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &DeleteBucketInventoryConfigurationInput{} - } - - output = &DeleteBucketInventoryConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. -// -// Deletes an inventory configuration (identified by the inventory ID) from -// the bucket. -// -// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). -// -// Operations related to DeleteBucketInventoryConfiguration include: -// -// * GetBucketInventoryConfiguration -// -// * PutBucketInventoryConfiguration -// -// * ListBucketInventoryConfigurations -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketInventoryConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration -func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { - req, out := c.DeleteBucketInventoryConfigurationRequest(input) - return out, req.Send() -} - -// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketInventoryConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { - req, out := c.DeleteBucketInventoryConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketLifecycle = "DeleteBucketLifecycle" - -// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketLifecycle operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketLifecycleRequest method. -// req, resp := client.DeleteBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle -func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { - op := &request.Operation{ - Name: opDeleteBucketLifecycle, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &DeleteBucketLifecycleInput{} - } - - output = &DeleteBucketLifecycleOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. -// -// Deletes the lifecycle configuration from the specified bucket. Amazon S3 -// removes all the lifecycle configuration rules in the lifecycle subresource -// associated with the bucket. Your objects never expire, and Amazon S3 no longer -// automatically deletes any objects on the basis of rules contained in the -// deleted lifecycle configuration. -// -// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration -// action. By default, the bucket owner has this permission and the bucket owner -// can grant this permission to others. -// -// There is usually some time lag before lifecycle configuration deletion is -// fully propagated to all the Amazon S3 systems. -// -// For more information about the object expiration, see Elements to Describe -// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). -// -// Related actions include: -// -// * PutBucketLifecycleConfiguration -// -// * GetBucketLifecycleConfiguration -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketLifecycle for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle -func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { - req, out := c.DeleteBucketLifecycleRequest(input) - return out, req.Send() -} - -// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketLifecycle for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { - req, out := c.DeleteBucketLifecycleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" - -// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. -// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration -func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteBucketMetricsConfiguration, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &DeleteBucketMetricsConfigurationInput{} - } - - output = &DeleteBucketMetricsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. -// -// Deletes a metrics configuration for the Amazon CloudWatch request metrics -// (specified by the metrics configuration ID) from the bucket. Note that this -// doesn't include the daily storage metrics. -// -// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// For information about CloudWatch request metrics for Amazon S3, see Monitoring -// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// -// The following operations are related to DeleteBucketMetricsConfiguration: -// -// * GetBucketMetricsConfiguration -// -// * PutBucketMetricsConfiguration -// -// * ListBucketMetricsConfigurations -// -// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketMetricsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration -func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { - req, out := c.DeleteBucketMetricsConfigurationRequest(input) - return out, req.Send() -} - -// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketMetricsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { - req, out := c.DeleteBucketMetricsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketPolicy = "DeleteBucketPolicy" - -// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketPolicyRequest method. -// req, resp := client.DeleteBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy -func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { - op := &request.Operation{ - Name: opDeleteBucketPolicy, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &DeleteBucketPolicyInput{} - } - - output = &DeleteBucketPolicyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketPolicy API operation for Amazon Simple Storage Service. -// -// This implementation of the DELETE operation uses the policy subresource to -// delete the policy of a specified bucket. If you are using an identity other -// than the root user of the AWS account that owns the bucket, the calling identity -// must have the DeleteBucketPolicy permissions on the specified bucket and -// belong to the bucket owner's account to use this operation. -// -// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 -// Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. -// -// As a security precaution, the root user of the AWS account that owns a bucket -// can always use this operation, even if the policy explicitly denies the root -// user the ability to perform this action. -// -// For more information about bucket policies, see Using Bucket Policies and -// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). -// -// The following operations are related to DeleteBucketPolicy -// -// * CreateBucket -// -// * DeleteObject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketPolicy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy -func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { - req, out := c.DeleteBucketPolicyRequest(input) - return out, req.Send() -} - -// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { - req, out := c.DeleteBucketPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketReplication = "DeleteBucketReplication" - -// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketReplication operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketReplication for more information on using the DeleteBucketReplication -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketReplicationRequest method. -// req, resp := client.DeleteBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication -func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { - op := &request.Operation{ - Name: opDeleteBucketReplication, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &DeleteBucketReplicationInput{} - } - - output = &DeleteBucketReplicationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketReplication API operation for Amazon Simple Storage Service. -// -// Deletes the replication configuration from the bucket. -// -// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration -// action. The bucket owner has these permissions by default and can grant it -// to others. For more information about permissions, see Permissions Related -// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// It can take a while for the deletion of a replication configuration to fully -// propagate. -// -// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 Developer Guide. -// -// The following operations are related to DeleteBucketReplication: -// -// * PutBucketReplication -// -// * GetBucketReplication -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketReplication for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication -func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { - req, out := c.DeleteBucketReplicationRequest(input) - return out, req.Send() -} - -// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketReplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { - req, out := c.DeleteBucketReplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketTagging = "DeleteBucketTagging" - -// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketTagging for more information on using the DeleteBucketTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketTaggingRequest method. -// req, resp := client.DeleteBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging -func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { - op := &request.Operation{ - Name: opDeleteBucketTagging, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &DeleteBucketTaggingInput{} - } - - output = &DeleteBucketTaggingOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketTagging API operation for Amazon Simple Storage Service. -// -// Deletes the tags from the bucket. -// -// To use this operation, you must have permission to perform the s3:PutBucketTagging -// action. By default, the bucket owner has this permission and can grant this -// permission to others. -// -// The following operations are related to DeleteBucketTagging: -// -// * GetBucketTagging -// -// * PutBucketTagging -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging -func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { - req, out := c.DeleteBucketTaggingRequest(input) - return out, req.Send() -} - -// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { - req, out := c.DeleteBucketTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketWebsite = "DeleteBucketWebsite" - -// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketWebsite operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteBucketWebsiteRequest method. -// req, resp := client.DeleteBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite -func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { - op := &request.Operation{ - Name: opDeleteBucketWebsite, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &DeleteBucketWebsiteInput{} - } - - output = &DeleteBucketWebsiteOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketWebsite API operation for Amazon Simple Storage Service. -// -// This operation removes the website configuration for a bucket. Amazon S3 -// returns a 200 OK response upon successfully deleting a website configuration -// on the specified bucket. You will get a 200 OK response if the website configuration -// you are trying to delete does not exist on the bucket. Amazon S3 returns -// a 404 response if the bucket specified in the request does not exist. -// -// This DELETE operation requires the S3:DeleteBucketWebsite permission. By -// default, only the bucket owner can delete the website configuration attached -// to a bucket. However, bucket owners can grant other users permission to delete -// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite -// permission. -// -// For more information about hosting websites, see Hosting Websites on Amazon -// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). -// -// The following operations are related to DeleteBucketWebsite: -// -// * GetBucketWebsite -// -// * PutBucketWebsite -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketWebsite for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite -func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { - req, out := c.DeleteBucketWebsiteRequest(input) - return out, req.Send() -} - -// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketWebsite for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { - req, out := c.DeleteBucketWebsiteRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteObject = "DeleteObject" - -// DeleteObjectRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteObject for more information on using the DeleteObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteObjectRequest method. -// req, resp := client.DeleteObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject -func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { - op := &request.Operation{ - Name: opDeleteObject, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &DeleteObjectInput{} - } - - output = &DeleteObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteObject API operation for Amazon Simple Storage Service. -// -// Removes the null version (if there is one) of an object and inserts a delete -// marker, which becomes the latest version of the object. If there isn't a -// null version, Amazon S3 does not remove any objects. -// -// To remove a specific version, you must be the bucket owner and you must use -// the version Id subresource. Using this subresource permanently deletes the -// version. If the object deleted is a delete marker, Amazon S3 sets the response -// header, x-amz-delete-marker, to true. -// -// If the object you want to delete is in a bucket where the bucket versioning -// configuration is MFA Delete enabled, you must include the x-amz-mfa request -// header in the DELETE versionId request. Requests that include x-amz-mfa must -// use HTTPS. -// -// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). -// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). -// -// You can delete objects by explicitly calling the DELETE Object API or configure -// its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for -// you. If you want to block users or accounts from removing or deleting objects -// from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, -// and s3:PutLifeCycleConfiguration actions. -// -// The following operation is related to DeleteObject: -// -// * PutObject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteObject for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject -func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - return out, req.Send() -} - -// DeleteObjectWithContext is the same as DeleteObject with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteObjectTagging = "DeleteObjectTagging" - -// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObjectTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteObjectTagging for more information on using the DeleteObjectTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteObjectTaggingRequest method. -// req, resp := client.DeleteObjectTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging -func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { - op := &request.Operation{ - Name: opDeleteObjectTagging, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}?tagging", - } - - if input == nil { - input = &DeleteObjectTaggingInput{} - } - - output = &DeleteObjectTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteObjectTagging API operation for Amazon Simple Storage Service. -// -// Removes the entire tag set from the specified object. For more information -// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). -// -// To use this operation, you must have permission to perform the s3:DeleteObjectTagging -// action. -// -// To delete tags of a specific object version, add the versionId query parameter -// in the request. You will need permission for the s3:DeleteObjectVersionTagging -// action. -// -// The following operations are related to DeleteBucketMetricsConfiguration: -// -// * PutObjectTagging -// -// * GetObjectTagging -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteObjectTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging -func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { - req, out := c.DeleteObjectTaggingRequest(input) - return out, req.Send() -} - -// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteObjectTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { - req, out := c.DeleteObjectTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteObjects = "DeleteObjects" - -// DeleteObjectsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObjects operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteObjects for more information on using the DeleteObjects -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteObjectsRequest method. -// req, resp := client.DeleteObjectsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects -func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { - op := &request.Operation{ - Name: opDeleteObjects, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}?delete", - } - - if input == nil { - input = &DeleteObjectsInput{} - } - - output = &DeleteObjectsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteObjects API operation for Amazon Simple Storage Service. -// -// This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. If you know the object keys that you want to delete, -// then this operation provides a suitable alternative to sending individual -// delete requests, reducing per-request overhead. -// -// The request contains a list of up to 1000 keys that you want to delete. In -// the XML, you provide the object key names, and optionally, version IDs if -// you want to delete a specific version of the object from a versioning-enabled -// bucket. For each key, Amazon S3 performs a delete operation and returns the -// result of that delete, success, or failure, in the response. Note that if -// the object specified in the request is not found, Amazon S3 returns the result -// as deleted. -// -// The operation supports two modes for the response: verbose and quiet. By -// default, the operation uses verbose mode in which the response includes the -// result of deletion of each key in your request. In quiet mode the response -// includes only keys where the delete operation encountered an error. For a -// successful deletion, the operation does not return any information about -// the delete in the response body. -// -// When performing this operation on an MFA Delete enabled bucket, that attempts -// to delete any versioned objects, you must include an MFA token. If you do -// not provide one, the entire request will fail, even if there are non-versioned -// objects you are trying to delete. If you provide an invalid token, whether -// there are versioned keys in the request or not, the entire Multi-Object Delete -// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). -// -// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. -// Amazon S3 uses the header value to ensure that your request body has not -// been altered in transit. -// -// The following operations are related to DeleteObjects: -// -// * CreateMultipartUpload -// -// * UploadPart -// -// * CompleteMultipartUpload -// -// * ListParts -// -// * AbortMultipartUpload -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteObjects for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects -func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { - req, out := c.DeleteObjectsRequest(input) - return out, req.Send() -} - -// DeleteObjectsWithContext is the same as DeleteObjects with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteObjects for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { - req, out := c.DeleteObjectsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeletePublicAccessBlock = "DeletePublicAccessBlock" - -// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the DeletePublicAccessBlock operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeletePublicAccessBlockRequest method. -// req, resp := client.DeletePublicAccessBlockRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock -func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { - op := &request.Operation{ - Name: opDeletePublicAccessBlock, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?publicAccessBlock", - } - - if input == nil { - input = &DeletePublicAccessBlockInput{} - } - - output = &DeletePublicAccessBlockOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. -// -// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use -// this operation, you must have the s3:PutBucketPublicAccessBlock permission. -// For more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// The following operations are related to DeleteBucketMetricsConfiguration: -// -// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// * GetPublicAccessBlock -// -// * PutPublicAccessBlock -// -// * GetBucketPolicyStatus -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeletePublicAccessBlock for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock -func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { - req, out := c.DeletePublicAccessBlockRequest(input) - return out, req.Send() -} - -// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of -// the ability to pass a context and additional request options. -// -// See DeletePublicAccessBlock for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { - req, out := c.DeletePublicAccessBlockRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" - -// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAccelerateConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. -// req, resp := client.GetBucketAccelerateConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration -func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketAccelerateConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?accelerate", - } - - if input == nil { - input = &GetBucketAccelerateConfigurationInput{} - } - - output = &GetBucketAccelerateConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. -// -// This implementation of the GET operation uses the accelerate subresource -// to return the Transfer Acceleration state of a bucket, which is either Enabled -// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that -// enables you to perform faster data transfers to and from Amazon S3. -// -// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// You set the Transfer Acceleration state of an existing bucket to Enabled -// or Suspended by using the PutBucketAccelerateConfiguration operation. -// -// A GET accelerate request does not return a state value for a bucket that -// has no transfer acceleration state. A bucket has no Transfer Acceleration -// state if a state has never been set on the bucket. -// -// For more information about transfer acceleration, see Transfer Acceleration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev//transfer-acceleration.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Related Resources -// -// * PutBucketAccelerateConfiguration -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketAccelerateConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration -func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { - req, out := c.GetBucketAccelerateConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketAccelerateConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { - req, out := c.GetBucketAccelerateConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketAcl = "GetBucketAcl" - -// GetBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAcl operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketAcl for more information on using the GetBucketAcl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketAclRequest method. -// req, resp := client.GetBucketAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl -func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { - op := &request.Operation{ - Name: opGetBucketAcl, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?acl", - } - - if input == nil { - input = &GetBucketAclInput{} - } - - output = &GetBucketAclOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketAcl API operation for Amazon Simple Storage Service. -// -// This implementation of the GET operation uses the acl subresource to return -// the access control list (ACL) of a bucket. To use GET to return the ACL of -// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission -// is granted to the anonymous user, you can return the ACL of the bucket without -// using an authorization header. -// -// Related Resources -// -// * -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketAcl for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl -func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { - req, out := c.GetBucketAclRequest(input) - return out, req.Send() -} - -// GetBucketAclWithContext is the same as GetBucketAcl with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { - req, out := c.GetBucketAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" - -// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. -// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration -func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketAnalyticsConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &GetBucketAnalyticsConfigurationInput{} - } - - output = &GetBucketAnalyticsConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. -// -// This implementation of the GET operation returns an analytics configuration -// (identified by the analytics configuration ID) from the bucket. -// -// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// For information about Amazon S3 analytics feature, see Amazon S3 Analytics -// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Related Resources -// -// * -// -// * -// -// * -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketAnalyticsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration -func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { - req, out := c.GetBucketAnalyticsConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketAnalyticsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { - req, out := c.GetBucketAnalyticsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketCors = "GetBucketCors" - -// GetBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketCors operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketCors for more information on using the GetBucketCors -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketCorsRequest method. -// req, resp := client.GetBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors -func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { - op := &request.Operation{ - Name: opGetBucketCors, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &GetBucketCorsInput{} - } - - output = &GetBucketCorsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketCors API operation for Amazon Simple Storage Service. -// -// Returns the cors configuration information set for the bucket. -// -// To use this operation, you must have permission to perform the s3:GetBucketCORS -// action. By default, the bucket owner has this permission and can grant it -// to others. -// -// For more information about cors, see Enabling Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). -// -// The following operations are related to GetBucketCors: -// -// * PutBucketCors -// -// * DeleteBucketCors -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketCors for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors -func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { - req, out := c.GetBucketCorsRequest(input) - return out, req.Send() -} - -// GetBucketCorsWithContext is the same as GetBucketCors with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketCors for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { - req, out := c.GetBucketCorsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketEncryption = "GetBucketEncryption" - -// GetBucketEncryptionRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketEncryption operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketEncryption for more information on using the GetBucketEncryption -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketEncryptionRequest method. -// req, resp := client.GetBucketEncryptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption -func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { - op := &request.Operation{ - Name: opGetBucketEncryption, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?encryption", - } - - if input == nil { - input = &GetBucketEncryptionInput{} - } - - output = &GetBucketEncryptionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketEncryption API operation for Amazon Simple Storage Service. -// -// Returns the default encryption configuration for an Amazon S3 bucket. For -// information about the Amazon S3 default encryption feature, see Amazon S3 -// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). -// -// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// The following operations are related to GetBucketEncryption: -// -// * PutBucketEncryption -// -// * DeleteBucketEncryption -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketEncryption for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption -func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { - req, out := c.GetBucketEncryptionRequest(input) - return out, req.Send() -} - -// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketEncryption for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { - req, out := c.GetBucketEncryptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" - -// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketInventoryConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketInventoryConfigurationRequest method. -// req, resp := client.GetBucketInventoryConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration -func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketInventoryConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &GetBucketInventoryConfigurationInput{} - } - - output = &GetBucketInventoryConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. -// -// Returns an inventory configuration (identified by the inventory configuration -// ID) from the bucket. -// -// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). -// -// The following operations are related to GetBucketInventoryConfiguration: -// -// * DeleteBucketInventoryConfiguration -// -// * ListBucketInventoryConfigurations -// -// * PutBucketInventoryConfiguration -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketInventoryConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration -func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { - req, out := c.GetBucketInventoryConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketInventoryConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { - req, out := c.GetBucketInventoryConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLifecycle = "GetBucketLifecycle" - -// GetBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLifecycle operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketLifecycle for more information on using the GetBucketLifecycle -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketLifecycleRequest method. -// req, resp := client.GetBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle -// -// Deprecated: GetBucketLifecycle has been deprecated -func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") - } - op := &request.Operation{ - Name: opGetBucketLifecycle, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &GetBucketLifecycleInput{} - } - - output = &GetBucketLifecycleOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLifecycle API operation for Amazon Simple Storage Service. -// -// -// For an updated version of this API, see GetBucketLifecycleConfiguration. -// If you configured a bucket lifecycle using the filter element, you should -// see the updated version of this topic. This topic is provided for backward -// compatibility. -// -// Returns the lifecycle configuration information set on the bucket. For information -// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). -// -// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// GetBucketLifecycle has the following special error: -// -// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle -// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault -// Code Prefix: Client -// -// The following operations are related to GetBucketLifecycle: -// -// * GetBucketLifecycleConfiguration -// -// * PutBucketLifecycle -// -// * DeleteBucketLifecycle -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLifecycle for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle -// -// Deprecated: GetBucketLifecycle has been deprecated -func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { - req, out := c.GetBucketLifecycleRequest(input) - return out, req.Send() -} - -// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLifecycle for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -// -// Deprecated: GetBucketLifecycleWithContext has been deprecated -func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { - req, out := c.GetBucketLifecycleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" - -// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLifecycleConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. -// req, resp := client.GetBucketLifecycleConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration -func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketLifecycleConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &GetBucketLifecycleConfigurationInput{} - } - - output = &GetBucketLifecycleConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. -// -// -// Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The response describes -// the new filter element that you can use to specify a filter to select a subset -// of objects to which the rule applies. If you are still using previous version -// of the lifecycle configuration, it works. For the earlier API description, -// see GetBucketLifecycle. -// -// Returns the lifecycle configuration information set on the bucket. For information -// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). -// -// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration -// action. The bucket owner has this permission, by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// GetBucketLifecycleConfiguration has the following special error: -// -// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle -// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault -// Code Prefix: Client -// -// The following operations are related to DeleteBucketMetricsConfiguration: -// -// * GetBucketLifecycle -// -// * PutBucketLifecycle -// -// * DeleteBucketLifecycle -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLifecycleConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration -func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { - req, out := c.GetBucketLifecycleConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLifecycleConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { - req, out := c.GetBucketLifecycleConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLocation = "GetBucketLocation" - -// GetBucketLocationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLocation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketLocation for more information on using the GetBucketLocation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketLocationRequest method. -// req, resp := client.GetBucketLocationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation -func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { - op := &request.Operation{ - Name: opGetBucketLocation, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?location", - } - - if input == nil { - input = &GetBucketLocationInput{} - } - - output = &GetBucketLocationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLocation API operation for Amazon Simple Storage Service. -// -// Returns the Region the bucket resides in. You set the bucket's Region using -// the LocationConstraint request parameter in a CreateBucket request. For more -// information, see CreateBucket. -// -// To use this implementation of the operation, you must be the bucket owner. -// -// The following operations are related to GetBucketLocation: -// -// * GetObject -// -// * CreateBucket -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLocation for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation -func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) - return out, req.Send() -} - -// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLocation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLogging = "GetBucketLogging" - -// GetBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLogging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketLogging for more information on using the GetBucketLogging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketLoggingRequest method. -// req, resp := client.GetBucketLoggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging -func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { - op := &request.Operation{ - Name: opGetBucketLogging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?logging", - } - - if input == nil { - input = &GetBucketLoggingInput{} - } - - output = &GetBucketLoggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLogging API operation for Amazon Simple Storage Service. -// -// Returns the logging status of a bucket and the permissions users have to -// view and modify that status. To use GET, you must be the bucket owner. -// -// The following operations are related to GetBucketLogging: -// -// * CreateBucket -// -// * PutBucketLogging -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLogging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging -func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { - req, out := c.GetBucketLoggingRequest(input) - return out, req.Send() -} - -// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLogging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { - req, out := c.GetBucketLoggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" - -// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketMetricsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketMetricsConfigurationRequest method. -// req, resp := client.GetBucketMetricsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration -func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketMetricsConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &GetBucketMetricsConfigurationInput{} - } - - output = &GetBucketMetricsConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. -// -// Gets a metrics configuration (specified by the metrics configuration ID) -// from the bucket. Note that this doesn't include the daily storage metrics. -// -// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// For information about CloudWatch request metrics for Amazon S3, see Monitoring -// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// -// The following operations are related to GetBucketMetricsConfiguration: -// -// * PutBucketMetricsConfiguration -// -// * DeleteBucketMetricsConfiguration -// -// * ListBucketMetricsConfigurations -// -// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketMetricsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration -func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { - req, out := c.GetBucketMetricsConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketMetricsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { - req, out := c.GetBucketMetricsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketNotification = "GetBucketNotification" - -// GetBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketNotification operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketNotification for more information on using the GetBucketNotification -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketNotificationRequest method. -// req, resp := client.GetBucketNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification -// -// Deprecated: GetBucketNotification has been deprecated -func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") - } - op := &request.Operation{ - Name: opGetBucketNotification, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &GetBucketNotificationConfigurationRequest{} - } - - output = &NotificationConfigurationDeprecated{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketNotification API operation for Amazon Simple Storage Service. -// -// No longer used, see GetBucketNotificationConfiguration. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketNotification for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification -// -// Deprecated: GetBucketNotification has been deprecated -func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { - req, out := c.GetBucketNotificationRequest(input) - return out, req.Send() -} - -// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketNotification for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -// -// Deprecated: GetBucketNotificationWithContext has been deprecated -func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { - req, out := c.GetBucketNotificationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" - -// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketNotificationConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketNotificationConfigurationRequest method. -// req, resp := client.GetBucketNotificationConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration -func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { - op := &request.Operation{ - Name: opGetBucketNotificationConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &GetBucketNotificationConfigurationRequest{} - } - - output = &NotificationConfiguration{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. -// -// Returns the notification configuration of a bucket. -// -// If notifications are not enabled on the bucket, the operation returns an -// empty NotificationConfiguration element. -// -// By default, you must be the bucket owner to read the notification configuration -// of a bucket. However, the bucket owner can use a bucket policy to grant permission -// to other users to read this configuration with the s3:GetBucketNotification -// permission. -// -// For more information about setting and reading the notification configuration -// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). -// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). -// -// The following operation is related to GetBucketNotification: -// -// * PutBucketNotification -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketNotificationConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration -func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { - req, out := c.GetBucketNotificationConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketNotificationConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { - req, out := c.GetBucketNotificationConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketPolicy = "GetBucketPolicy" - -// GetBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketPolicy for more information on using the GetBucketPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketPolicyRequest method. -// req, resp := client.GetBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy -func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { - op := &request.Operation{ - Name: opGetBucketPolicy, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &GetBucketPolicyInput{} - } - - output = &GetBucketPolicyOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketPolicy API operation for Amazon Simple Storage Service. -// -// Returns the policy of a specified bucket. If you are using an identity other -// than the root user of the AWS account that owns the bucket, the calling identity -// must have the GetBucketPolicy permissions on the specified bucket and belong -// to the bucket owner's account in order to use this operation. -// -// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a -// 405 Method Not Allowed error. -// -// As a security precaution, the root user of the AWS account that owns a bucket -// can always use this operation, even if the policy explicitly denies the root -// user the ability to perform this action. -// -// For more information about bucket policies, see Using Bucket Policies and -// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). -// -// The following operation is related to GetBucketPolicy: -// -// * GetObject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketPolicy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy -func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { - req, out := c.GetBucketPolicyRequest(input) - return out, req.Send() -} - -// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { - req, out := c.GetBucketPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketPolicyStatus = "GetBucketPolicyStatus" - -// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketPolicyStatus operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketPolicyStatusRequest method. -// req, resp := client.GetBucketPolicyStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus -func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { - op := &request.Operation{ - Name: opGetBucketPolicyStatus, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?policyStatus", - } - - if input == nil { - input = &GetBucketPolicyStatusInput{} - } - - output = &GetBucketPolicyStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. -// -// Retrieves the policy status for an Amazon S3 bucket, indicating whether the -// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus -// permission. For more information about Amazon S3 permissions, see Specifying -// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// -// For more information about when Amazon S3 considers a bucket public, see -// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). -// -// The following operations are related to GetBucketPolicyStatus: -// -// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// * GetPublicAccessBlock -// -// * PutPublicAccessBlock -// -// * DeletePublicAccessBlock -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketPolicyStatus for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus -func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { - req, out := c.GetBucketPolicyStatusRequest(input) - return out, req.Send() -} - -// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketPolicyStatus for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { - req, out := c.GetBucketPolicyStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketReplication = "GetBucketReplication" - -// GetBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketReplication operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketReplication for more information on using the GetBucketReplication -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketReplicationRequest method. -// req, resp := client.GetBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication -func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { - op := &request.Operation{ - Name: opGetBucketReplication, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &GetBucketReplicationInput{} - } - - output = &GetBucketReplicationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketReplication API operation for Amazon Simple Storage Service. -// -// Returns the replication configuration of a bucket. -// -// It can take a while to propagate the put or delete a replication configuration -// to all Amazon S3 systems. Therefore, a get request soon after put or delete -// can return a wrong result. -// -// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// This operation requires permissions for the s3:GetReplicationConfiguration -// action. For more information about permissions, see Using Bucket Policies -// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). -// -// If you include the Filter element in a replication configuration, you must -// also include the DeleteMarkerReplication and Priority elements. The response -// also returns those elements. -// -// For information about GetBucketReplication errors, see ReplicationErrorCodeList -// -// The following operations are related to GetBucketReplication: -// -// * PutBucketReplication -// -// * DeleteBucketReplication -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketReplication for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication -func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { - req, out := c.GetBucketReplicationRequest(input) - return out, req.Send() -} - -// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketReplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { - req, out := c.GetBucketReplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketRequestPayment = "GetBucketRequestPayment" - -// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketRequestPayment operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketRequestPaymentRequest method. -// req, resp := client.GetBucketRequestPaymentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment -func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { - op := &request.Operation{ - Name: opGetBucketRequestPayment, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?requestPayment", - } - - if input == nil { - input = &GetBucketRequestPaymentInput{} - } - - output = &GetBucketRequestPaymentOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketRequestPayment API operation for Amazon Simple Storage Service. -// -// Returns the request payment configuration of a bucket. To use this version -// of the operation, you must be the bucket owner. For more information, see -// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). -// -// The following operations are related to GetBucketRequestPayment: -// -// * ListObjects -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketRequestPayment for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment -func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { - req, out := c.GetBucketRequestPaymentRequest(input) - return out, req.Send() -} - -// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketRequestPayment for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { - req, out := c.GetBucketRequestPaymentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketTagging = "GetBucketTagging" - -// GetBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketTagging for more information on using the GetBucketTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketTaggingRequest method. -// req, resp := client.GetBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging -func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { - op := &request.Operation{ - Name: opGetBucketTagging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &GetBucketTaggingInput{} - } - - output = &GetBucketTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketTagging API operation for Amazon Simple Storage Service. -// -// Returns the tag set associated with the bucket. -// -// To use this operation, you must have permission to perform the s3:GetBucketTagging -// action. By default, the bucket owner has this permission and can grant this -// permission to others. -// -// GetBucketTagging has the following special error: -// -// * Error code: NoSuchTagSetError Description: There is no tag set associated -// with the bucket. -// -// The following operations are related to GetBucketTagging: -// -// * PutBucketTagging -// -// * DeleteBucketTagging -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging -func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { - req, out := c.GetBucketTaggingRequest(input) - return out, req.Send() -} - -// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { - req, out := c.GetBucketTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketVersioning = "GetBucketVersioning" - -// GetBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketVersioning operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketVersioning for more information on using the GetBucketVersioning -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketVersioningRequest method. -// req, resp := client.GetBucketVersioningRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning -func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { - op := &request.Operation{ - Name: opGetBucketVersioning, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versioning", - } - - if input == nil { - input = &GetBucketVersioningInput{} - } - - output = &GetBucketVersioningOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketVersioning API operation for Amazon Simple Storage Service. -// -// Returns the versioning state of a bucket. -// -// To retrieve the versioning state of a bucket, you must be the bucket owner. -// -// This implementation also returns the MFA Delete status of the versioning -// state. If the MFA Delete status is enabled, the bucket owner must use an -// authentication device to change the versioning state of the bucket. -// -// The following operations are related to GetBucketVersioning: -// -// * GetObject -// -// * PutObject -// -// * DeleteObject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketVersioning for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning -func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { - req, out := c.GetBucketVersioningRequest(input) - return out, req.Send() -} - -// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketVersioning for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { - req, out := c.GetBucketVersioningRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketWebsite = "GetBucketWebsite" - -// GetBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketWebsite operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketWebsite for more information on using the GetBucketWebsite -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetBucketWebsiteRequest method. -// req, resp := client.GetBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite -func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { - op := &request.Operation{ - Name: opGetBucketWebsite, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &GetBucketWebsiteInput{} - } - - output = &GetBucketWebsiteOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketWebsite API operation for Amazon Simple Storage Service. -// -// Returns the website configuration for a bucket. To host website on Amazon -// S3, you can configure a bucket as website by adding a website configuration. -// For more information about hosting websites, see Hosting Websites on Amazon -// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). -// -// This GET operation requires the S3:GetBucketWebsite permission. By default, -// only the bucket owner can read the bucket website configuration. However, -// bucket owners can allow other users to read the website configuration by -// writing a bucket policy granting them the S3:GetBucketWebsite permission. -// -// The following operations are related to DeleteBucketWebsite: -// -// * DeleteBucketWebsite -// -// * PutBucketWebsite -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketWebsite for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite -func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { - req, out := c.GetBucketWebsiteRequest(input) - return out, req.Send() -} - -// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketWebsite for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { - req, out := c.GetBucketWebsiteRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObject = "GetObject" - -// GetObjectRequest generates a "aws/request.Request" representing the -// client's request for the GetObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObject for more information on using the GetObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectRequest method. -// req, resp := client.GetObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject -func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { - op := &request.Operation{ - Name: opGetObject, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &GetObjectInput{} - } - - output = &GetObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObject API operation for Amazon Simple Storage Service. -// -// Retrieves objects from Amazon S3. To use GET, you must have READ access to -// the object. If you grant READ access to the anonymous user, you can return -// the object without using an authorization header. -// -// An Amazon S3 bucket has no directory hierarchy such as you would find in -// a typical computer file system. You can, however, create a logical hierarchy -// by using object key names that imply a folder structure. For example, instead -// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. -// -// To get an object from such a logical hierarchy, specify the full key name -// for the object in the GET operation. For a virtual hosted-style request example, -// if you have the object photos/2006/February/sample.jpg, specify the resource -// as /photos/2006/February/sample.jpg. For a path-style request example, if -// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, -// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For -// more information about request types, see HTTP Host Header Bucket Specification -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). -// -// To distribute large files to many people, you can save bandwidth costs by -// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). -// For more information about returning the ACL of an object, see GetObjectAcl. -// -// If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE -// storage classes, before you can retrieve the object you must first restore -// a copy using . Otherwise, this operation returns an InvalidObjectStateError -// error. For information about restoring archived objects, see Restoring Archived -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). -// -// Encryption request headers, like x-amz-server-side-encryption, should not -// be sent for GET requests if your object uses server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed -// encryption keys (SSE-S3). If your object does use these types of keys, you’ll -// get an HTTP 400 BadRequest error. -// -// If you encrypt an object by using server-side encryption with customer-provided -// encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you GET the object, you must use the following headers: -// -// * x-amz-server-side​-encryption​-customer-algorithm -// -// * x-amz-server-side​-encryption​-customer-key -// -// * x-amz-server-side​-encryption​-customer-key-MD5 -// -// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). -// -// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging -// action), the response also returns the x-amz-tagging-count header that provides -// the count of number of tags associated with the object. You can use GetObjectTagging -// to retrieve the tag set associated with an object. -// -// Permissions -// -// You need the s3:GetObject permission for this operation. For more information, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// If the object you request does not exist, the error Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. -// -// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will -// return an HTTP status code 404 ("no such key") error. -// -// * If you don’t have the s3:ListBucket permission, Amazon S3 will return -// an HTTP status code 403 ("access denied") error. -// -// Versioning -// -// By default, the GET operation returns the current version of an object. To -// return a different version, use the versionId subresource. -// -// If the current version of the object is a delete marker, Amazon S3 behaves -// as if the object was deleted and includes x-amz-delete-marker: true in the -// response. -// -// For more information about versioning, see PutBucketVersioning. -// -// Overriding Response Header Values -// -// There are times when you want to override certain response header values -// in a GET response. For example, you might override the Content-Disposition -// response header value in your GET request. -// -// You can override values for a set of response headers using the following -// query parameters. These response header values are sent only on a successful -// request, that is, when status code 200 OK is returned. The set of headers -// you can override using these parameters is a subset of the headers that Amazon -// S3 accepts when you create an object. The response headers that you can override -// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, -// Content-Disposition, and Content-Encoding. To override these header values -// in the GET response, you use the following request parameters. -// -// You must sign the request, either using an Authorization header or a presigned -// URL, when using these parameters. They cannot be used with an unsigned (anonymous) -// request. -// -// * response-content-type -// -// * response-content-language -// -// * response-expires -// -// * response-cache-control -// -// * response-content-disposition -// -// * response-content-encoding -// -// Additional Considerations about Request Headers -// -// If both of the If-Match and If-Unmodified-Since headers are present in the -// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since -// condition evaluates to false; then, S3 returns 200 OK and the data requested. -// -// If both of the If-None-Match and If-Modified-Since headers are present in -// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since -// condition evaluates to true; then, S3 returns 304 Not Modified response code. -// -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). -// -// The following operations are related to GetObject: -// -// * ListBuckets -// -// * GetObjectAcl -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject -func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - return out, req.Send() -} - -// GetObjectWithContext is the same as GetObject with the addition of -// the ability to pass a context and additional request options. -// -// See GetObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectAcl = "GetObjectAcl" - -// GetObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectAcl operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectAcl for more information on using the GetObjectAcl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectAclRequest method. -// req, resp := client.GetObjectAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl -func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { - op := &request.Operation{ - Name: opGetObjectAcl, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?acl", - } - - if input == nil { - input = &GetObjectAclInput{} - } - - output = &GetObjectAclOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectAcl API operation for Amazon Simple Storage Service. -// -// Returns the access control list (ACL) of an object. To use this operation, -// you must have READ_ACP access to the object. -// -// Versioning -// -// By default, GET returns ACL information about the current version of an object. -// To return ACL information about a different version, use the versionId subresource. -// -// The following operations are related to GetObjectAcl: -// -// * GetObject -// -// * DeleteObject -// -// * PutObject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectAcl for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl -func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { - req, out := c.GetObjectAclRequest(input) - return out, req.Send() -} - -// GetObjectAclWithContext is the same as GetObjectAcl with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { - req, out := c.GetObjectAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectLegalHold = "GetObjectLegalHold" - -// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectLegalHold operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectLegalHold for more information on using the GetObjectLegalHold -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectLegalHoldRequest method. -// req, resp := client.GetObjectLegalHoldRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold -func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { - op := &request.Operation{ - Name: opGetObjectLegalHold, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?legal-hold", - } - - if input == nil { - input = &GetObjectLegalHoldInput{} - } - - output = &GetObjectLegalHoldOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectLegalHold API operation for Amazon Simple Storage Service. -// -// Gets an object's current Legal Hold status. For more information, see Locking -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectLegalHold for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold -func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { - req, out := c.GetObjectLegalHoldRequest(input) - return out, req.Send() -} - -// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectLegalHold for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { - req, out := c.GetObjectLegalHoldRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectLockConfiguration = "GetObjectLockConfiguration" - -// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectLockConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectLockConfigurationRequest method. -// req, resp := client.GetObjectLockConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration -func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { - op := &request.Operation{ - Name: opGetObjectLockConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?object-lock", - } - - if input == nil { - input = &GetObjectLockConfigurationInput{} - } - - output = &GetObjectLockConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. -// -// Gets the Object Lock configuration for a bucket. The rule specified in the -// Object Lock configuration will be applied by default to every new object -// placed in the specified bucket. For more information, see Locking Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectLockConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration -func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { - req, out := c.GetObjectLockConfigurationRequest(input) - return out, req.Send() -} - -// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectLockConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { - req, out := c.GetObjectLockConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectRetention = "GetObjectRetention" - -// GetObjectRetentionRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectRetention operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectRetention for more information on using the GetObjectRetention -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectRetentionRequest method. -// req, resp := client.GetObjectRetentionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention -func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { - op := &request.Operation{ - Name: opGetObjectRetention, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?retention", - } - - if input == nil { - input = &GetObjectRetentionInput{} - } - - output = &GetObjectRetentionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectRetention API operation for Amazon Simple Storage Service. -// -// Retrieves an object's retention settings. For more information, see Locking -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectRetention for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention -func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { - req, out := c.GetObjectRetentionRequest(input) - return out, req.Send() -} - -// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectRetention for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { - req, out := c.GetObjectRetentionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectTagging = "GetObjectTagging" - -// GetObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectTagging for more information on using the GetObjectTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectTaggingRequest method. -// req, resp := client.GetObjectTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { - op := &request.Operation{ - Name: opGetObjectTagging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?tagging", - } - - if input == nil { - input = &GetObjectTaggingInput{} - } - - output = &GetObjectTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectTagging API operation for Amazon Simple Storage Service. -// -// Returns the tag-set of an object. You send the GET request against the tagging -// subresource associated with the object. -// -// To use this operation, you must have permission to perform the s3:GetObjectTagging -// action. By default, the GET operation returns information about current version -// of an object. For a versioned bucket, you can have multiple versions of an -// object in your bucket. To retrieve tags of any other version, use the versionId -// query parameter. You also need permission for the s3:GetObjectVersionTagging -// action. -// -// By default, the bucket owner has this permission and can grant this permission -// to others. -// -// For information about the Amazon S3 object tagging feature, see Object Tagging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). -// -// The following operation is related to GetObjectTagging: -// -// * PutObjectTagging -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) - return out, req.Send() -} - -// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectTorrent = "GetObjectTorrent" - -// GetObjectTorrentRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTorrent operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectTorrent for more information on using the GetObjectTorrent -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectTorrentRequest method. -// req, resp := client.GetObjectTorrentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { - op := &request.Operation{ - Name: opGetObjectTorrent, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?torrent", - } - - if input == nil { - input = &GetObjectTorrentInput{} - } - - output = &GetObjectTorrentOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectTorrent API operation for Amazon Simple Storage Service. -// -// Return torrent files from a bucket. BitTorrent can save you bandwidth when -// you're distributing large files. For more information about BitTorrent, see -// Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). -// -// You can get torrent only for objects that are less than 5 GB in size and -// that are not encrypted using server-side encryption with customer-provided -// encryption key. -// -// To use GET, you must have READ access to the object. -// -// The following operation is related to GetObjectTorrent: -// -// * GetObject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTorrent for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) - return out, req.Send() -} - -// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectTorrent for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetPublicAccessBlock = "GetPublicAccessBlock" - -// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the GetPublicAccessBlock operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetPublicAccessBlockRequest method. -// req, resp := client.GetPublicAccessBlockRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock -func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { - op := &request.Operation{ - Name: opGetPublicAccessBlock, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?publicAccessBlock", - } - - if input == nil { - input = &GetPublicAccessBlockInput{} - } - - output = &GetPublicAccessBlockOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPublicAccessBlock API operation for Amazon Simple Storage Service. -// -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To -// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. -// For more information about Amazon S3 permissions, see Specifying Permissions -// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// -// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket -// or an object, it checks the PublicAccessBlock configuration for both the -// bucket (or the bucket that contains the object) and the bucket owner's account. -// If the PublicAccessBlock settings are different between the bucket and the -// account, Amazon S3 uses the most restrictive combination of the bucket-level -// and account-level settings. -// -// For more information about when Amazon S3 considers a bucket or an object -// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). -// -// The following operations are related to GetPublicAccessBlock: -// -// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// * PutPublicAccessBlock -// -// * GetPublicAccessBlock -// -// * DeletePublicAccessBlock -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetPublicAccessBlock for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock -func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { - req, out := c.GetPublicAccessBlockRequest(input) - return out, req.Send() -} - -// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of -// the ability to pass a context and additional request options. -// -// See GetPublicAccessBlock for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { - req, out := c.GetPublicAccessBlockRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opHeadBucket = "HeadBucket" - -// HeadBucketRequest generates a "aws/request.Request" representing the -// client's request for the HeadBucket operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See HeadBucket for more information on using the HeadBucket -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the HeadBucketRequest method. -// req, resp := client.HeadBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket -func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { - op := &request.Operation{ - Name: opHeadBucket, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &HeadBucketInput{} - } - - output = &HeadBucketOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// HeadBucket API operation for Amazon Simple Storage Service. -// -// This operation is useful to determine if a bucket exists and you have permission -// to access it. The operation returns a 200 OK if the bucket exists and you -// have permission to access it. Otherwise, the operation might return responses -// such as 404 Not Found and 403 Forbidden. -// -// To use this operation, you must have permissions to perform the s3:ListBucket -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation HeadBucket for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket -func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) - return out, req.Send() -} - -// HeadBucketWithContext is the same as HeadBucket with the addition of -// the ability to pass a context and additional request options. -// -// See HeadBucket for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opHeadObject = "HeadObject" - -// HeadObjectRequest generates a "aws/request.Request" representing the -// client's request for the HeadObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See HeadObject for more information on using the HeadObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the HeadObjectRequest method. -// req, resp := client.HeadObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject -func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { - op := &request.Operation{ - Name: opHeadObject, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &HeadObjectInput{} - } - - output = &HeadObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// HeadObject API operation for Amazon Simple Storage Service. -// -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. -// -// A HEAD request has the same options as a GET operation on an object. The -// response is identical to the GET response except that there is no response -// body. -// -// If you encrypt an object by using server-side encryption with customer-provided -// encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you retrieve the metadata from the object, you must use the following headers: -// -// * x-amz-server-side​-encryption​-customer-algorithm -// -// * x-amz-server-side​-encryption​-customer-key -// -// * x-amz-server-side​-encryption​-customer-key-MD5 -// -// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). -// -// Encryption request headers, like x-amz-server-side-encryption, should not -// be sent for GET requests if your object uses server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed -// encryption keys (SSE-S3). If your object does use these types of keys, you’ll -// get an HTTP 400 BadRequest error. -// -// Request headers are limited to 8 KB in size. For more information, see Common -// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). -// -// Consider the following when using request headers: -// -// * Consideration 1 – If both of the If-Match and If-Unmodified-Since -// headers are present in the request as follows: If-Match condition evaluates -// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon -// S3 returns 200 OK and the data requested. -// -// * Consideration 2 – If both of the If-None-Match and If-Modified-Since -// headers are present in the request as follows: If-None-Match condition -// evaluates to false, and; If-Modified-Since condition evaluates to true; -// Then Amazon S3 returns the 304 Not Modified response code. -// -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). -// -// Permissions -// -// You need the s3:GetObject permission for this operation. For more information, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// If the object you request does not exist, the error Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. -// -// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns -// an HTTP status code 404 ("no such key") error. -// -// * If you don’t have the s3:ListBucket permission, Amazon S3 returns -// an HTTP status code 403 ("access denied") error. -// -// The following operation is related to HeadObject: -// -// * GetObject -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses -// for more information on returned errors. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation HeadObject for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject -func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) - return out, req.Send() -} - -// HeadObjectWithContext is the same as HeadObject with the addition of -// the ability to pass a context and additional request options. -// -// See HeadObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" - -// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. -// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations -func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { - op := &request.Operation{ - Name: opListBucketAnalyticsConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &ListBucketAnalyticsConfigurationsInput{} - } - - output = &ListBucketAnalyticsConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. -// -// Lists the analytics configurations for the bucket. You can have up to 1,000 -// analytics configurations per bucket. -// -// This operation supports list pagination and does not return more than 100 -// configurations at a time. You should always check the IsTruncated element -// in the response. If there are no more configurations to list, IsTruncated -// is set to false. If there are more configurations to list, IsTruncated is -// set to true, and there will be a value in NextContinuationToken. You use -// the NextContinuationToken value to continue the pagination of the list by -// passing the value in continuation-token in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// For information about Amazon S3 analytics feature, see Amazon S3 Analytics -// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). -// -// The following operations are related to ListBucketAnalyticsConfigurations: -// -// * GetBucketAnalyticsConfiguration -// -// * DeleteBucketAnalyticsConfiguration -// -// * PutBucketAnalyticsConfiguration -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketAnalyticsConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations -func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { - req, out := c.ListBucketAnalyticsConfigurationsRequest(input) - return out, req.Send() -} - -// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See ListBucketAnalyticsConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { - req, out := c.ListBucketAnalyticsConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" - -// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketInventoryConfigurations operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. -// req, resp := client.ListBucketInventoryConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations -func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { - op := &request.Operation{ - Name: opListBucketInventoryConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &ListBucketInventoryConfigurationsInput{} - } - - output = &ListBucketInventoryConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. -// -// Returns a list of inventory configurations for the bucket. You can have up -// to 1,000 analytics configurations per bucket. -// -// This operation supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there is a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token -// in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// -// The following operations are related to ListBucketInventoryConfigurations: -// -// * GetBucketInventoryConfiguration -// -// * DeleteBucketInventoryConfiguration -// -// * PutBucketInventoryConfiguration -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketInventoryConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations -func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { - req, out := c.ListBucketInventoryConfigurationsRequest(input) - return out, req.Send() -} - -// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See ListBucketInventoryConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { - req, out := c.ListBucketInventoryConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" - -// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketMetricsConfigurations operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. -// req, resp := client.ListBucketMetricsConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations -func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { - op := &request.Operation{ - Name: opListBucketMetricsConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &ListBucketMetricsConfigurationsInput{} - } - - output = &ListBucketMetricsConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. -// -// Lists the metrics configurations for the bucket. The metrics configurations -// are only for the request metrics of the bucket and do not provide information -// on daily storage metrics. You can have up to 1,000 configurations per bucket. -// -// This operation supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there is a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token -// in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// For more information about metrics configurations and CloudWatch request -// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// -// The following operations are related to ListBucketMetricsConfigurations: -// -// * PutBucketMetricsConfiguration -// -// * GetBucketMetricsConfiguration -// -// * DeleteBucketMetricsConfiguration -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketMetricsConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations -func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { - req, out := c.ListBucketMetricsConfigurationsRequest(input) - return out, req.Send() -} - -// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See ListBucketMetricsConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { - req, out := c.ListBucketMetricsConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBuckets = "ListBuckets" - -// ListBucketsRequest generates a "aws/request.Request" representing the -// client's request for the ListBuckets operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBuckets for more information on using the ListBuckets -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListBucketsRequest method. -// req, resp := client.ListBucketsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets -func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { - op := &request.Operation{ - Name: opListBuckets, - HTTPMethod: "GET", - HTTPPath: "/", - } - - if input == nil { - input = &ListBucketsInput{} - } - - output = &ListBucketsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBuckets API operation for Amazon Simple Storage Service. -// -// Returns a list of all buckets owned by the authenticated sender of the request. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBuckets for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets -func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) - return out, req.Send() -} - -// ListBucketsWithContext is the same as ListBuckets with the addition of -// the ability to pass a context and additional request options. -// -// See ListBuckets for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListMultipartUploads = "ListMultipartUploads" - -// ListMultipartUploadsRequest generates a "aws/request.Request" representing the -// client's request for the ListMultipartUploads operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListMultipartUploads for more information on using the ListMultipartUploads -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListMultipartUploadsRequest method. -// req, resp := client.ListMultipartUploadsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads -func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { - op := &request.Operation{ - Name: opListMultipartUploads, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?uploads", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "UploadIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, - LimitToken: "MaxUploads", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListMultipartUploadsInput{} - } - - output = &ListMultipartUploadsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListMultipartUploads API operation for Amazon Simple Storage Service. -// -// This operation lists in-progress multipart uploads. An in-progress multipart -// upload is a multipart upload that has been initiated using the Initiate Multipart -// Upload request, but has not yet been completed or aborted. -// -// This operation returns at most 1,000 multipart uploads in the response. 1,000 -// multipart uploads is the maximum number of uploads a response can include, -// which is also the default value. You can further limit the number of uploads -// in a response by specifying the max-uploads parameter in the response. If -// additional multipart uploads satisfy the list criteria, the response will -// contain an IsTruncated element with the value true. To list the additional -// multipart uploads, use the key-marker and upload-id-marker request parameters. -// -// In the response, the uploads are sorted by key. If your application has initiated -// more than one multipart upload using the same object key, then uploads in -// the response are first sorted by key. Additionally, uploads are sorted in -// ascending order within each key by the upload initiation time. -// -// For more information on multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). -// -// For information on permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// -// The following operations are related to ListMultipartUploads: -// -// * CreateMultipartUpload -// -// * UploadPart -// -// * CompleteMultipartUpload -// -// * ListParts -// -// * AbortMultipartUpload -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListMultipartUploads for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads -func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) - return out, req.Send() -} - -// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of -// the ability to pass a context and additional request options. -// -// See ListMultipartUploads for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListMultipartUploads method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListMultipartUploads operation. -// pageNum := 0 -// err := client.ListMultipartUploadsPages(params, -// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { - return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListMultipartUploadsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListMultipartUploadsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListObjectVersions = "ListObjectVersions" - -// ListObjectVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjectVersions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListObjectVersions for more information on using the ListObjectVersions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListObjectVersionsRequest method. -// req, resp := client.ListObjectVersionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions -func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { - op := &request.Operation{ - Name: opListObjectVersions, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versions", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "VersionIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListObjectVersionsInput{} - } - - output = &ListObjectVersionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListObjectVersions API operation for Amazon Simple Storage Service. -// -// Returns metadata about all of the versions of objects in a bucket. You can -// also use request parameters as selection criteria to return metadata about -// a subset of all the object versions. -// -// A 200 OK response can contain valid or invalid XML. Make sure to design your -// application to parse the contents of the response and handle it appropriately. -// -// To use this operation, you must have READ access to the bucket. -// -// The following operations are related to ListObjectVersions: -// -// * ListObjectsV2 -// -// * GetObject -// -// * PutObject -// -// * DeleteObject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjectVersions for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions -func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) - return out, req.Send() -} - -// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of -// the ability to pass a context and additional request options. -// -// See ListObjectVersions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjectVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjectVersions operation. -// pageNum := 0 -// err := client.ListObjectVersionsPages(params, -// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { - return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListObjects = "ListObjects" - -// ListObjectsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjects operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListObjects for more information on using the ListObjects -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListObjectsRequest method. -// req, resp := client.ListObjectsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects -func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { - op := &request.Operation{ - Name: opListObjects, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker || Contents[-1].Key"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListObjectsInput{} - } - - output = &ListObjectsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListObjects API operation for Amazon Simple Storage Service. -// -// Returns some or all (up to 1,000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure -// to design your application to parse the contents of the response and handle -// it appropriately. -// -// This API has been revised. We recommend that you use the newer version, ListObjectsV2, -// when developing applications. For backward compatibility, Amazon S3 continues -// to support ListObjects. -// -// The following operations are related to ListObjects: -// -// * ListObjectsV2 -// -// * GetObject -// -// * PutObject -// -// * CreateBucket -// -// * ListBuckets -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjects for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects -func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) - return out, req.Send() -} - -// ListObjectsWithContext is the same as ListObjects with the addition of -// the ability to pass a context and additional request options. -// -// See ListObjects for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListObjectsPages iterates over the pages of a ListObjects operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjects method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjects operation. -// pageNum := 0 -// err := client.ListObjectsPages(params, -// func(page *s3.ListObjectsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { - return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListObjectsPagesWithContext same as ListObjectsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListObjectsV2 = "ListObjectsV2" - -// ListObjectsV2Request generates a "aws/request.Request" representing the -// client's request for the ListObjectsV2 operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListObjectsV2 for more information on using the ListObjectsV2 -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListObjectsV2Request method. -// req, resp := client.ListObjectsV2Request(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 -func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { - op := &request.Operation{ - Name: opListObjectsV2, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?list-type=2", - Paginator: &request.Paginator{ - InputTokens: []string{"ContinuationToken"}, - OutputTokens: []string{"NextContinuationToken"}, - LimitToken: "MaxKeys", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListObjectsV2Input{} - } - - output = &ListObjectsV2Output{} - req = c.newRequest(op, input, output) - return -} - -// ListObjectsV2 API operation for Amazon Simple Storage Service. -// -// Returns some or all (up to 1,000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure -// to design your application to parse the contents of the response and handle -// it appropriately. -// -// To use this operation, you must have READ access to the bucket. -// -// To use this operation in an AWS Identity and Access Management (IAM) policy, -// you must have permissions to perform the s3:ListBucket action. The bucket -// owner has this permission by default and can grant this permission to others. -// For more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// This section describes the latest revision of the API. We recommend that -// you use this revised API for application development. For backward compatibility, -// Amazon S3 continues to support the prior version of this API, ListObjects. -// -// To get a list of your buckets, see ListBuckets. -// -// The following operations are related to ListObjectsV2: -// -// * GetObject -// -// * PutObject -// -// * CreateBucket -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjectsV2 for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 -func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { - req, out := c.ListObjectsV2Request(input) - return out, req.Send() -} - -// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of -// the ability to pass a context and additional request options. -// -// See ListObjectsV2 for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { - req, out := c.ListObjectsV2Request(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjectsV2 method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjectsV2 operation. -// pageNum := 0 -// err := client.ListObjectsV2Pages(params, -// func(page *s3.ListObjectsV2Output, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { - return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectsV2Input - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectsV2Request(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListParts = "ListParts" - -// ListPartsRequest generates a "aws/request.Request" representing the -// client's request for the ListParts operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListParts for more information on using the ListParts -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListPartsRequest method. -// req, resp := client.ListPartsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts -func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { - op := &request.Operation{ - Name: opListParts, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - Paginator: &request.Paginator{ - InputTokens: []string{"PartNumberMarker"}, - OutputTokens: []string{"NextPartNumberMarker"}, - LimitToken: "MaxParts", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListPartsInput{} - } - - output = &ListPartsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListParts API operation for Amazon Simple Storage Service. -// -// Lists the parts that have been uploaded for a specific multipart upload. -// This operation must include the upload ID, which you obtain by sending the -// initiate multipart upload request (see CreateMultipartUpload). This request -// returns a maximum of 1,000 uploaded parts. The default number of parts returned -// is 1,000 parts. You can restrict the number of parts returned by specifying -// the max-parts request parameter. If your multipart upload consists of more -// than 1,000 parts, the response returns an IsTruncated field with the value -// of true, and a NextPartNumberMarker element. In subsequent ListParts requests -// you can include the part-number-marker query string parameter and set its -// value to the NextPartNumberMarker field value from the previous response. -// -// For more information on multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). -// -// For information on permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// -// The following operations are related to ListParts: -// -// * CreateMultipartUpload -// -// * UploadPart -// -// * CompleteMultipartUpload -// -// * AbortMultipartUpload -// -// * ListMultipartUploads -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListParts for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts -func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) - return out, req.Send() -} - -// ListPartsWithContext is the same as ListParts with the addition of -// the ability to pass a context and additional request options. -// -// See ListParts for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListPartsPages iterates over the pages of a ListParts operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListParts method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListParts operation. -// pageNum := 0 -// err := client.ListPartsPages(params, -// func(page *s3.ListPartsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { - return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListPartsPagesWithContext same as ListPartsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListPartsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListPartsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" - -// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAccelerateConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. -// req, resp := client.PutBucketAccelerateConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration -func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketAccelerateConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?accelerate", - } - - if input == nil { - input = &PutBucketAccelerateConfigurationInput{} - } - - output = &PutBucketAccelerateConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. -// -// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer -// Acceleration is a bucket-level feature that enables you to perform faster -// data transfers to Amazon S3. -// -// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// The Transfer Acceleration state of a bucket can be set to one of the following -// two values: -// -// * Enabled – Enables accelerated data transfers to the bucket. -// -// * Suspended – Disables accelerated data transfers to the bucket. -// -// The GetBucketAccelerateConfiguration operation returns the transfer acceleration -// state of a bucket. -// -// After setting the Transfer Acceleration state of a bucket to Enabled, it -// might take up to thirty minutes before the data transfer rates to the bucket -// increase. -// -// The name of the bucket used for Transfer Acceleration must be DNS-compliant -// and must not contain periods ("."). -// -// For more information about transfer acceleration, see Transfer Acceleration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). -// -// The following operations are related to PutBucketAccelerateConfiguration: -// -// * GetBucketAccelerateConfiguration -// -// * CreateBucket -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAccelerateConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration -func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { - req, out := c.PutBucketAccelerateConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketAccelerateConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { - req, out := c.PutBucketAccelerateConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketAcl = "PutBucketAcl" - -// PutBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAcl operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketAcl for more information on using the PutBucketAcl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketAclRequest method. -// req, resp := client.PutBucketAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl -func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { - op := &request.Operation{ - Name: opPutBucketAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?acl", - } - - if input == nil { - input = &PutBucketAclInput{} - } - - output = &PutBucketAclOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketAcl API operation for Amazon Simple Storage Service. -// -// Sets the permissions on an existing bucket using access control lists (ACL). -// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// To set the ACL of a bucket, you must have WRITE_ACP permission. -// -// You can use one of the following two ways to set a bucket's permissions: -// -// * Specify the ACL in the request body -// -// * Specify permissions using request headers -// -// You cannot specify access permission using both the body and the request -// headers. -// -// Depending on your application needs, you may choose to set the ACL on a bucket -// using either the request body or the headers. For example, if you have an -// existing application that updates a bucket ACL using the request body, then -// you can continue to use that approach. -// -// Access Permissions -// -// You can set access permissions using one of the following methods: -// -// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports -// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a -// predefined set of grantees and permissions. Specify the canned ACL name -// as the value of x-amz-acl. If you use this header, you cannot use other -// access control-specific headers in your request. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using -// these headers, you specify explicit access permissions and grantees (AWS -// accounts or Amazon S3 groups) who will receive the permission. If you -// use these ACL-specific headers, you cannot use the x-amz-acl header to -// set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see Access Control -// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// You specify each grantee as a type=value pair, where the type is one of -// the following: emailAddress – if the value specified is the email address -// of an AWS account id – if the value specified is the canonical user -// ID of an AWS account uri – if you are granting permissions to a predefined -// group For example, the following x-amz-grant-write header grants create, -// overwrite, and delete objects permission to LogDelivery group predefined -// by Amazon S3 and two AWS accounts identified by their email addresses. -// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", -// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// Grantee Values -// -// You can specify the person (grantee) to whom you're assigning access rights -// (using request elements) in the following ways: -// -// * By Email address: <>Grantees@email.com<>lt;/Grantee> -// The grantee is resolved to the CanonicalUser and, in a response to a GET -// Object acl request, appears as the CanonicalUser. -// -// * By the person's ID: <>ID<><>GranteesEmail<> -// DisplayName is optional and ignored in the request -// -// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// Related Resources -// -// * CreateBucket -// -// * DeleteBucket -// -// * GetObjectAcl -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAcl for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl -func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { - req, out := c.PutBucketAclRequest(input) - return out, req.Send() -} - -// PutBucketAclWithContext is the same as PutBucketAcl with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { - req, out := c.PutBucketAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" - -// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. -// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration -func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketAnalyticsConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &PutBucketAnalyticsConfigurationInput{} - } - - output = &PutBucketAnalyticsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. -// -// Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). You can have up to 1,000 analytics configurations per -// bucket. -// -// You can choose to have storage class analysis export analysis reports sent -// to a comma-separated values (CSV) flat file. See the DataExport request element. -// Reports are updated daily and are based on the object filters that you configure. -// When selecting data export, you specify a destination bucket and an optional -// destination prefix where the file is written. You can export the data to -// a destination bucket in a different account. However, the destination bucket -// must be in the same Region as the bucket that you are making the PUT analytics -// configuration to. For more information, see Amazon S3 Analytics – Storage -// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). -// -// You must create a bucket policy on the destination bucket where the exported -// file is written to grant permissions to Amazon S3 to write objects to the -// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory -// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). -// -// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// Special Errors -// -// * HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid -// argument. -// -// * HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: -// You are attempting to create a new configuration but have already reached -// the 1,000-configuration limit. -// -// * HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not -// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration -// bucket permission to set the configuration on the bucket. -// -// Related Resources -// -// * -// -// * -// -// * -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAnalyticsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration -func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { - req, out := c.PutBucketAnalyticsConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketAnalyticsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { - req, out := c.PutBucketAnalyticsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketCors = "PutBucketCors" - -// PutBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketCors operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketCors for more information on using the PutBucketCors -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketCorsRequest method. -// req, resp := client.PutBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors -func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { - op := &request.Operation{ - Name: opPutBucketCors, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &PutBucketCorsInput{} - } - - output = &PutBucketCorsOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketCors API operation for Amazon Simple Storage Service. -// -// Sets the cors configuration for your bucket. If the configuration exists, -// Amazon S3 replaces it. -// -// To use this operation, you must be allowed to perform the s3:PutBucketCORS -// action. By default, the bucket owner has this permission and can grant it -// to others. -// -// You set this configuration on a bucket so that the bucket can service cross-origin -// requests. For example, you might want to enable a request whose origin is -// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com -// by using the browser's XMLHttpRequest capability. -// -// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors -// subresource to the bucket. The cors subresource is an XML document in which -// you configure rules that identify origins and the HTTP methods that can be -// executed on your bucket. The document is limited to 64 KB in size. -// -// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) -// against a bucket, it evaluates the cors configuration on the bucket and uses -// the first CORSRule rule that matches the incoming browser request to enable -// a cross-origin request. For a rule to match, the following conditions must -// be met: -// -// * The request's Origin header must match AllowedOrigin elements. -// -// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method -// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod -// elements. -// -// * Every header specified in the Access-Control-Request-Headers request -// header of a pre-flight request must match an AllowedHeader element. -// -// For more information about CORS, go to Enabling Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// Simple Storage Service Developer Guide. -// -// Related Resources -// -// * GetBucketCors -// -// * DeleteBucketCors -// -// * RESTOPTIONSobject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketCors for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors -func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { - req, out := c.PutBucketCorsRequest(input) - return out, req.Send() -} - -// PutBucketCorsWithContext is the same as PutBucketCors with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketCors for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { - req, out := c.PutBucketCorsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketEncryption = "PutBucketEncryption" - -// PutBucketEncryptionRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketEncryption operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketEncryption for more information on using the PutBucketEncryption -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketEncryptionRequest method. -// req, resp := client.PutBucketEncryptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption -func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { - op := &request.Operation{ - Name: opPutBucketEncryption, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?encryption", - } - - if input == nil { - input = &PutBucketEncryptionInput{} - } - - output = &PutBucketEncryptionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketEncryption API operation for Amazon Simple Storage Service. -// -// This implementation of the PUT operation uses the encryption subresource -// to set the default encryption state of an existing bucket. -// -// This implementation of the PUT operation sets default encryption for a bucket -// using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS -// customer master keys (CMKs) (SSE-KMS). -// -// This operation requires AWS Signature Version 4. For more information, see -// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). -// -// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Related Resources -// -// * GetBucketEncryption -// -// * DeleteBucketEncryption -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketEncryption for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption -func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { - req, out := c.PutBucketEncryptionRequest(input) - return out, req.Send() -} - -// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketEncryption for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { - req, out := c.PutBucketEncryptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" - -// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketInventoryConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketInventoryConfigurationRequest method. -// req, resp := client.PutBucketInventoryConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration -func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketInventoryConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &PutBucketInventoryConfigurationInput{} - } - - output = &PutBucketInventoryConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. -// -// This implementation of the PUT operation adds an inventory configuration -// (identified by the inventory ID) to the bucket. You can have up to 1,000 -// inventory configurations per bucket. -// -// Amazon S3 inventory generates inventories of the objects in the bucket on -// a daily or weekly basis, and the results are published to a flat file. The -// bucket that is inventoried is called the source bucket, and the bucket where -// the inventory flat file is stored is called the destination bucket. The destination -// bucket must be in the same AWS Region as the source bucket. -// -// When you configure an inventory for a source bucket, you specify the destination -// bucket where you want the inventory to be stored, and whether to generate -// the inventory daily or weekly. You can also configure what object metadata -// to include and whether to inventory all object versions or only current versions. -// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev//storage-inventory.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// You must create a bucket policy on the destination bucket to grant permissions -// to Amazon S3 to write objects to the bucket in the defined location. For -// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage -// Class Analysis. (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) -// -// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Special Errors -// -// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument -// -// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. -// -// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutInventoryConfiguration -// bucket permission to set the configuration on the bucket -// -// Related Resources -// -// * GetBucketInventoryConfiguration -// -// * DeleteBucketInventoryConfiguration -// -// * ListBucketInventoryConfigurations -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketInventoryConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration -func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { - req, out := c.PutBucketInventoryConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketInventoryConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { - req, out := c.PutBucketInventoryConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketLifecycle = "PutBucketLifecycle" - -// PutBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycle operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketLifecycle for more information on using the PutBucketLifecycle -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketLifecycleRequest method. -// req, resp := client.PutBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle -// -// Deprecated: PutBucketLifecycle has been deprecated -func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") - } - op := &request.Operation{ - Name: opPutBucketLifecycle, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &PutBucketLifecycleInput{} - } - - output = &PutBucketLifecycleOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketLifecycle API operation for Amazon Simple Storage Service. -// -// -// For an updated version of this API, see PutBucketLifecycleConfiguration. -// This version has been deprecated. Existing lifecycle configurations will -// work. For new lifecycle configurations, use the updated API. -// -// Creates a new lifecycle configuration for the bucket or replaces an existing -// lifecycle configuration. For information about lifecycle configuration, see -// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev//object-lifecycle-mgmt.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// By default, all Amazon S3 resources, including buckets, objects, and related -// subresources (for example, lifecycle configuration and website configuration) -// are private. Only the resource owner, the AWS account that created the resource, -// can access it. The resource owner can optionally grant access permissions -// to others by writing an access policy. For this operation, users must get -// the s3:PutLifecycleConfiguration permission. -// -// You can also explicitly deny permissions. Explicit denial also supersedes -// any other permissions. If you want to prevent users or accounts from removing -// or deleting objects from your bucket, you must deny them permissions for -// the following actions: -// -// * s3:DeleteObject -// -// * s3:DeleteObjectVersion -// -// * s3:PutLifecycleConfiguration -// -// For more information about permissions, see Managing Access Permissions to -// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// For more examples of transitioning objects to storage classes such as STANDARD_IA -// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev//intro-lifecycle-rules.html#lifecycle-configuration-examples). -// -// Related Resources -// -// * GetBucketLifecycle(Deprecated) -// -// * GetBucketLifecycleConfiguration -// -// * -// -// * By default, a resource owner—in this case, a bucket owner, which is -// the AWS account that created the bucket—can perform any of the operations. -// A resource owner can also grant others permission to perform the operation. -// For more information, see the following topics in the Amazon Simple Storage -// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html) -// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLifecycle for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle -// -// Deprecated: PutBucketLifecycle has been deprecated -func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) - return out, req.Send() -} - -// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketLifecycle for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -// -// Deprecated: PutBucketLifecycleWithContext has been deprecated -func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" - -// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycleConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. -// req, resp := client.PutBucketLifecycleConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration -func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketLifecycleConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &PutBucketLifecycleConfigurationInput{} - } - - output = &PutBucketLifecycleConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. -// -// Creates a new lifecycle configuration for the bucket or replaces an existing -// lifecycle configuration. For information about lifecycle configuration, see -// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The previous version -// of the API supported filtering based only on an object key name prefix, which -// is supported for backward compatibility. For the related API description, -// see PutBucketLifecycle. -// -// Rules -// -// You specify the lifecycle configuration in your request body. The lifecycle -// configuration is specified as XML consisting of one or more rules. Each rule -// consists of the following: -// -// * Filter identifying a subset of objects to which the rule applies. The -// filter can be based on a key name prefix, object tags, or a combination -// of both. -// -// * Status whether the rule is in effect. -// -// * One or more lifecycle transition and expiration actions that you want -// Amazon S3 to perform on the objects identified by the filter. If the state -// of your bucket is versioning-enabled or versioning-suspended, you can -// have many versions of the same object (one current version and zero or -// more noncurrent versions). Amazon S3 provides predefined actions that -// you can specify for current and noncurrent object versions. -// -// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). -// -// Permissions -// -// By default, all Amazon S3 resources are private, including buckets, objects, -// and related subresources (for example, lifecycle configuration and website -// configuration). Only the resource owner (that is, the AWS account that created -// it) can access the resource. The resource owner can optionally grant access -// permissions to others by writing an access policy. For this operation, a -// user must get the s3:PutLifecycleConfiguration permission. -// -// You can also explicitly deny permissions. Explicit deny also supersedes any -// other permissions. If you want to block users or accounts from removing or -// deleting objects from your bucket, you must deny them permissions for the -// following actions: -// -// * s3:DeleteObject -// -// * s3:DeleteObjectVersion -// -// * s3:PutLifecycleConfiguration -// -// For more information about permissions, see Managing Access Permissions to -// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// The following are related to PutBucketLifecycleConfiguration: -// -// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) -// -// * GetBucketLifecycleConfiguration -// -// * DeleteBucketLifecycle -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLifecycleConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration -func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketLifecycleConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketLogging = "PutBucketLogging" - -// PutBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLogging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketLogging for more information on using the PutBucketLogging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketLoggingRequest method. -// req, resp := client.PutBucketLoggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging -func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { - op := &request.Operation{ - Name: opPutBucketLogging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?logging", - } - - if input == nil { - input = &PutBucketLoggingInput{} - } - - output = &PutBucketLoggingOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketLogging API operation for Amazon Simple Storage Service. -// -// Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. All logs are saved to buckets -// in the same AWS Region as the source bucket. To set the logging status of -// a bucket, you must be the bucket owner. -// -// The bucket owner is automatically granted FULL_CONTROL to all logs. You use -// the Grantee request element to grant access to other people. The Permissions -// request element specifies the kind of access the grantee has to the logs. -// -// Grantee Values -// -// You can specify the person (grantee) to whom you're assigning access rights -// (using request elements) in the following ways: -// -// * By the person's ID: <>ID<><>GranteesEmail<> -// DisplayName is optional and ignored in the request. -// -// * By Email address: <>Grantees@email.com<> -// The grantee is resolved to the CanonicalUser and, in a response to a GET -// Object acl request, appears as the CanonicalUser. -// -// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// To enable logging, you use LoggingEnabled and its children request elements. -// To disable logging, you use an empty BucketLoggingStatus request element: -// -// -// -// For more information about server access logging, see Server Access Logging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). -// -// For more information about creating a bucket, see CreateBucket. For more -// information about returning the logging status of a bucket, see GetBucketLogging. -// -// The following operations are related to PutBucketLogging: -// -// * PutObject -// -// * DeleteBucket -// -// * CreateBucket -// -// * GetBucketLogging -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLogging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging -func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) - return out, req.Send() -} - -// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketLogging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" - -// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketMetricsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketMetricsConfigurationRequest method. -// req, resp := client.PutBucketMetricsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration -func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketMetricsConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &PutBucketMetricsConfigurationInput{} - } - - output = &PutBucketMetricsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. -// -// Sets a metrics configuration (specified by the metrics configuration ID) -// for the bucket. You can have up to 1,000 metrics configurations per bucket. -// If you're updating an existing metrics configuration, note that this is a -// full replacement of the existing metrics configuration. If you don't include -// the elements you want to keep, they are erased. -// -// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// For information about CloudWatch request metrics for Amazon S3, see Monitoring -// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// -// The following operations are related to PutBucketMetricsConfiguration: -// -// * DeleteBucketMetricsConfiguration -// -// * PutBucketMetricsConfiguration -// -// * ListBucketMetricsConfigurations -// -// GetBucketLifecycle has the following special error: -// -// * Error code: TooManyConfigurations Description: You are attempting to -// create a new configuration but have already reached the 1,000-configuration -// limit. HTTP Status Code: HTTP 400 Bad Request -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketMetricsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration -func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { - req, out := c.PutBucketMetricsConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketMetricsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { - req, out := c.PutBucketMetricsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketNotification = "PutBucketNotification" - -// PutBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotification operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketNotification for more information on using the PutBucketNotification -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketNotificationRequest method. -// req, resp := client.PutBucketNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification -// -// Deprecated: PutBucketNotification has been deprecated -func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") - } - op := &request.Operation{ - Name: opPutBucketNotification, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &PutBucketNotificationInput{} - } - - output = &PutBucketNotificationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketNotification API operation for Amazon Simple Storage Service. -// -// No longer used, see the PutBucketNotificationConfiguration operation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketNotification for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification -// -// Deprecated: PutBucketNotification has been deprecated -func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) - return out, req.Send() -} - -// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketNotification for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -// -// Deprecated: PutBucketNotificationWithContext has been deprecated -func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" - -// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotificationConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketNotificationConfigurationRequest method. -// req, resp := client.PutBucketNotificationConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration -func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketNotificationConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &PutBucketNotificationConfigurationInput{} - } - - output = &PutBucketNotificationConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. -// -// Enables notifications of specified events for a bucket. For more information -// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). -// -// Using this API, you can replace an existing notification configuration. The -// configuration is an XML file that defines the event types that you want Amazon -// S3 to publish and the destination where you want Amazon S3 to publish an -// event notification when it detects an event of the specified type. -// -// By default, your bucket has no event notifications configured. That is, the -// notification configuration will be an empty NotificationConfiguration. -// -// -// -// -// -// This operation replaces the existing notification configuration with the -// configuration you include in the request body. -// -// After Amazon S3 receives this request, it first verifies that any Amazon -// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon -// SQS) destination exists, and that the bucket owner has permission to publish -// to it by sending a test notification. In the case of AWS Lambda destinations, -// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission -// to invoke the function from the Amazon S3 bucket. For more information, see -// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). -// -// You can disable notifications by adding the empty NotificationConfiguration -// element. -// -// By default, only the bucket owner can configure notifications on a bucket. -// However, bucket owners can use a bucket policy to grant permission to other -// users to set this configuration with s3:PutBucketNotification permission. -// -// The PUT notification is an atomic operation. For example, suppose your notification -// configuration includes SNS topic, SQS queue, and Lambda function configurations. -// When you send a PUT request with this configuration, Amazon S3 sends test -// messages to your SNS topic. If the message fails, the entire PUT operation -// will fail, and Amazon S3 will not add the configuration to your bucket. -// -// Responses -// -// If the configuration in the request body includes only one TopicConfiguration -// specifying only the s3:ReducedRedundancyLostObject event type, the response -// will also include the x-amz-sns-test-message-id header containing the message -// ID of the test notification sent to the topic. -// -// The following operation is related to PutBucketNotificationConfiguration: -// -// * GetBucketNotificationConfiguration -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketNotificationConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration -func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketNotificationConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketPolicy = "PutBucketPolicy" - -// PutBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketPolicy for more information on using the PutBucketPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketPolicyRequest method. -// req, resp := client.PutBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy -func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { - op := &request.Operation{ - Name: opPutBucketPolicy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &PutBucketPolicyInput{} - } - - output = &PutBucketPolicyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketPolicy API operation for Amazon Simple Storage Service. -// -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using -// an identity other than the root user of the AWS account that owns the bucket, -// the calling identity must have the PutBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account in order to use this operation. -// -// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a -// 405 Method Not Allowed error. -// -// As a security precaution, the root user of the AWS account that owns a bucket -// can always use this operation, even if the policy explicitly denies the root -// user the ability to perform this action. -// -// For more information about bucket policies, see Using Bucket Policies and -// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). -// -// The following operations are related to PutBucketPolicy: -// -// * CreateBucket -// -// * DeleteBucket -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketPolicy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy -func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) - return out, req.Send() -} - -// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketReplication = "PutBucketReplication" - -// PutBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketReplication operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketReplication for more information on using the PutBucketReplication -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketReplicationRequest method. -// req, resp := client.PutBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication -func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { - op := &request.Operation{ - Name: opPutBucketReplication, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &PutBucketReplicationInput{} - } - - output = &PutBucketReplicationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketReplication API operation for Amazon Simple Storage Service. -// -// Creates a replication configuration or replaces an existing one. For more -// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 Developer Guide. -// -// To perform this operation, the user or role performing the operation must -// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) -// permission. -// -// Specify the replication configuration in the request body. In the replication -// configuration, you provide the name of the destination bucket where you want -// Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to -// replicate objects on your behalf, and other relevant information. -// -// A replication configuration must include at least one rule, and can contain -// a maximum of 1,000. Each rule identifies a subset of objects to replicate -// by filtering the objects in the source bucket. To choose additional subsets -// of objects to replicate, add a rule for each subset. All rules must specify -// the same destination bucket. -// -// To specify a subset of the objects in the source bucket to apply a replication -// rule to, add the Filter element as a child of the Rule element. You can filter -// objects based on an object key prefix, one or more object tags, or both. -// When you add the Filter element in the configuration, you must also add the -// following elements: DeleteMarkerReplication, Status, and Priority. -// -// For information about enabling versioning on a bucket, see Using Versioning -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). -// -// By default, a resource owner, in this case the AWS account that created the -// bucket, can perform this operation. The resource owner can also grant others -// permissions to perform the operation. For more information about permissions, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// Handling Replication of Encrypted Objects -// -// By default, Amazon S3 doesn't replicate objects that are stored at rest using -// server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted -// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, -// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about -// replication configuration, see Replicating Objects Created with SSE Using -// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). -// -// For information on PutBucketReplication errors, see ReplicationErrorCodeList -// -// The following operations are related to PutBucketReplication: -// -// * GetBucketReplication -// -// * DeleteBucketReplication -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketReplication for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication -func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) - return out, req.Send() -} - -// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketReplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketRequestPayment = "PutBucketRequestPayment" - -// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketRequestPayment operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketRequestPaymentRequest method. -// req, resp := client.PutBucketRequestPaymentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment -func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { - op := &request.Operation{ - Name: opPutBucketRequestPayment, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?requestPayment", - } - - if input == nil { - input = &PutBucketRequestPaymentInput{} - } - - output = &PutBucketRequestPaymentOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketRequestPayment API operation for Amazon Simple Storage Service. -// -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. For more information, see Requester Pays -// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). -// -// The following operations are related to PutBucketRequestPayment: -// -// * CreateBucket -// -// * GetBucketRequestPayment -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketRequestPayment for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment -func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) - return out, req.Send() -} - -// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketRequestPayment for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketTagging = "PutBucketTagging" - -// PutBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketTagging for more information on using the PutBucketTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketTaggingRequest method. -// req, resp := client.PutBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging -func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { - op := &request.Operation{ - Name: opPutBucketTagging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &PutBucketTaggingInput{} - } - - output = &PutBucketTaggingOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketTagging API operation for Amazon Simple Storage Service. -// -// Sets the tags for a bucket. -// -// Use tags to organize your AWS bill to reflect your own cost structure. To -// do this, sign up to get your AWS account bill with tag key values included. -// Then, to see the cost of combined resources, organize your billing information -// according to resources with the same tag key values. For example, you can -// tag several resources with a specific application name, and then organize -// your billing information to see the total cost of that application across -// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html). -// -// Within a bucket, if you add a tag that has the same key as an existing tag, -// the new value overwrites the old value. For more information, see Using Cost -// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). -// -// To use this operation, you must have permissions to perform the s3:PutBucketTagging -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). -// -// PutBucketTagging has the following special errors: -// -// * Error code: InvalidTagError Description: The tag provided was not a -// valid tag. This error can occur if the tag did not pass input validation. -// For information about tag restrictions, see User-Defined Tag Restrictions -// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2//allocation-tag-restrictions.html) -// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2//aws-tag-restrictions.html). -// -// * Error code: MalformedXMLError Description: The XML provided does not -// match the schema. -// -// * Error code: OperationAbortedError Description: A conflicting conditional -// operation is currently in progress against this resource. Please try again. -// -// * Error code: InternalError Description: The service was unable to apply -// the provided tag to the bucket. -// -// The following operations are related to PutBucketTagging: -// -// * GetBucketTagging -// -// * DeleteBucketTagging -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging -func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) - return out, req.Send() -} - -// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketVersioning = "PutBucketVersioning" - -// PutBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketVersioning operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketVersioning for more information on using the PutBucketVersioning -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketVersioningRequest method. -// req, resp := client.PutBucketVersioningRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning -func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { - op := &request.Operation{ - Name: opPutBucketVersioning, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?versioning", - } - - if input == nil { - input = &PutBucketVersioningInput{} - } - - output = &PutBucketVersioningOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketVersioning API operation for Amazon Simple Storage Service. -// -// Sets the versioning state of an existing bucket. To set the versioning state, -// you must be the bucket owner. -// -// You can set the versioning state with one of the following values: -// -// Enabled—Enables versioning for the objects in the bucket. All objects added -// to the bucket receive a unique version ID. -// -// Suspended—Disables versioning for the objects in the bucket. All objects -// added to the bucket receive the version ID null. -// -// If the versioning state has never been set on a bucket, it has no versioning -// state; a GetBucketVersioning request does not return a versioning state value. -// -// If the bucket owner enables MFA Delete in the bucket versioning configuration, -// the bucket owner must include the x-amz-mfa request header and the Status -// and the MfaDelete request elements in a request to set the versioning state -// of the bucket. -// -// If you have an object expiration lifecycle policy in your non-versioned bucket -// and you want to maintain the same permanent delete behavior when you enable -// versioning, you must add a noncurrent expiration policy. The noncurrent expiration -// lifecycle policy will manage the deletes of the noncurrent object versions -// in the version-enabled bucket. (A version-enabled bucket maintains one current -// and zero or more noncurrent object versions.) For more information, see Lifecycle -// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). -// -// Related Resources -// -// * CreateBucket -// -// * DeleteBucket -// -// * GetBucketVersioning -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketVersioning for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning -func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) - return out, req.Send() -} - -// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketVersioning for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketWebsite = "PutBucketWebsite" - -// PutBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketWebsite operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketWebsite for more information on using the PutBucketWebsite -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutBucketWebsiteRequest method. -// req, resp := client.PutBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite -func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { - op := &request.Operation{ - Name: opPutBucketWebsite, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &PutBucketWebsiteInput{} - } - - output = &PutBucketWebsiteOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketWebsite API operation for Amazon Simple Storage Service. -// -// Sets the configuration of the website that is specified in the website subresource. -// To configure a bucket as a website, you can add this subresource on the bucket -// with website configuration information such as the file name of the index -// document and any redirect rules. For more information, see Hosting Websites -// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). -// -// This PUT operation requires the S3:PutBucketWebsite permission. By default, -// only the bucket owner can configure the website attached to a bucket; however, -// bucket owners can allow other users to set the website configuration by writing -// a bucket policy that grants them the S3:PutBucketWebsite permission. -// -// To redirect all website requests sent to the bucket's website endpoint, you -// add a website configuration with the following elements. Because all requests -// are sent to another website, you don't need to provide index document name -// for the bucket. -// -// * WebsiteConfiguration -// -// * RedirectAllRequestsTo -// -// * HostName -// -// * Protocol -// -// If you want granular control over redirects, you can use the following elements -// to add routing rules that describe conditions for redirecting requests and -// information about the redirect destination. In this case, the website configuration -// must provide an index document for the bucket, because some requests might -// not be redirected. -// -// * WebsiteConfiguration -// -// * IndexDocument -// -// * Suffix -// -// * ErrorDocument -// -// * Key -// -// * RoutingRules -// -// * RoutingRule -// -// * Condition -// -// * HttpErrorCodeReturnedEquals -// -// * KeyPrefixEquals -// -// * Redirect -// -// * Protocol -// -// * HostName -// -// * ReplaceKeyPrefixWith -// -// * ReplaceKeyWith -// -// * HttpRedirectCode -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketWebsite for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite -func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) - return out, req.Send() -} - -// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketWebsite for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObject = "PutObject" - -// PutObjectRequest generates a "aws/request.Request" representing the -// client's request for the PutObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObject for more information on using the PutObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutObjectRequest method. -// req, resp := client.PutObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject -func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { - op := &request.Operation{ - Name: opPutObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &PutObjectInput{} - } - - output = &PutObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObject API operation for Amazon Simple Storage Service. -// -// Adds an object to a bucket. You must have WRITE permissions on a bucket to -// add an object to it. -// -// Amazon S3 never adds partial objects; if you receive a success response, -// Amazon S3 added the entire object to the bucket. -// -// Amazon S3 is a distributed system. If it receives multiple write requests -// for the same object simultaneously, it overwrites all but the last object -// written. Amazon S3 does not provide object locking; if you need this, make -// sure to build it into your application layer or use versioning instead. -// -// To ensure that data is not corrupted traversing the network, use the Content-MD5 -// header. When you use this header, Amazon S3 checks the object against the -// provided MD5 value and, if they do not match, returns an error. Additionally, -// you can calculate the MD5 while putting an object to Amazon S3 and compare -// the returned ETag to the calculated MD5 value. -// -// To configure your application to send the request headers before sending -// the request body, use the 100-continue HTTP status code. For PUT operations, -// this helps you avoid sending the message body if the message is rejected -// based on the headers (for example, because authentication fails or a redirect -// occurs). For more information on the 100-continue HTTP status code, see Section -// 8.2.3 of http://www.ietf.org/rfc/rfc2616.txt (http://www.ietf.org/rfc/rfc2616.txt). -// -// You can optionally request server-side encryption. With server-side encryption, -// Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts the data when you access it. You have the option to provide -// your own encryption key or use AWS managed encryption keys. For more information, -// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). -// -// Access Permissions -// -// You can optionally specify the accounts or groups that should be granted -// specific permissions on the new object. There are two ways to grant the permissions -// using the request headers: -// -// * Specify a canned ACL with the x-amz-acl request header. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters -// map to the set of permissions that Amazon S3 supports in an ACL. For more -// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// Server-Side- Encryption-Specific Request Headers -// -// You can optionally tell Amazon S3 to encrypt data at rest using server-side -// encryption. Server-side encryption is for data encryption at rest. Amazon -// S3 encrypts your data as it writes it to disks in its data centers and decrypts -// it when you access it. The option you use depends on whether you want to -// use AWS managed encryption keys or provide your own encryption key. -// -// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) -// stored in AWS Key Management Service (AWS KMS) – If you want AWS to -// manage the keys used to encrypt data, specify the following headers in -// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id -// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, -// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon -// S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want -// to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id -// of the symmetric customer managed CMK. Amazon S3 only supports symmetric -// CMKs and not asymmetric CMKs. For more information, see Using Symmetric -// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the AWS Key Management Service Developer Guide. All GET and PUT requests -// for an object protected by AWS KMS fail if you don't make them with SSL -// or by using SigV4. For more information about server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with CMKs stored in AWS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// * Use customer-provided encryption keys – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// x-amz-server-side​-encryption​-customer-algorithm x-amz-server-side​-encryption​-customer-key -// x-amz-server-side​-encryption​-customer-key-MD5 For more information -// about server-side encryption with CMKs stored in KMS (SSE-KMS), see Protecting -// Data Using Server-Side Encryption with CMKs stored in AWS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// Access-Control-List (ACL)-Specific Request Headers -// -// You also can use the following access control–related headers with this -// operation. By default, all objects are private. Only the owner has full access -// control. When adding a new object, you can grant permissions to individual -// AWS accounts or to predefined groups defined by Amazon S3. These permissions -// are then added to the Access Control List (ACL) on the object. For more information, -// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// With this operation, you can grant access permissions using one of the following -// two methods: -// -// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined -// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees -// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly — To explicitly grant access -// permissions to specific AWS accounts or groups, use the following headers. -// Each header maps to specific permissions that Amazon S3 supports in an -// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// In the header, you specify a list of grantees who get the specific permission. -// To grant permissions explicitly use: x-amz-grant-read x-amz-grant-write -// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You -// specify each grantee as a type=value pair, where the type is one of the -// following: emailAddress – if the value specified is the email address -// of an AWS account Using email addresses to specify a grantee is only supported -// in the following AWS Regions: US East (N. Virginia) US West (N. California) -// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific -// (Tokyo) EU (Ireland) South America (São Paulo) For a list of all the -// Amazon S3 supported Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the AWS General Reference id – if the value specified is the canonical -// user ID of an AWS account uri – if you are granting permissions to a -// predefined group For example, the following x-amz-grant-read header grants -// the AWS accounts identified by email addresses permissions to read object -// data and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", -// emailAddress="abc@amazon.com" -// -// Server-Side- Encryption-Specific Request Headers -// -// You can optionally tell Amazon S3 to encrypt data at rest using server-side -// encryption. Server-side encryption is for data encryption at rest. Amazon -// S3 encrypts your data as it writes it to disks in its data centers and decrypts -// it when you access it. The option you use depends on whether you want to -// use AWS-managed encryption keys or provide your own encryption key. -// -// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) -// stored in AWS Key Management Service (AWS KMS) – If you want AWS to -// manage the keys used to encrypt data, specify the following headers in -// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id -// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, -// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon -// S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want -// to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id -// of the symmetric customer managed CMK. Amazon S3 only supports symmetric -// CMKs and not asymmetric CMKs. For more information, see Using Symmetric -// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the AWS Key Management Service Developer Guide. All GET and PUT requests -// for an object protected by AWS KMS fail if you don't make them with SSL -// or by using SigV4. For more information about server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// * Use customer-provided encryption keys – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// If you use this feature, the ETag value that Amazon S3 returns in the -// response is not the MD5 of the object. x-amz-server-side​-encryption​-customer-algorithm -// x-amz-server-side​-encryption​-customer-key x-amz-server-side​-encryption​-customer-key-MD5 -// For more information about server-side encryption with CMKs stored in -// AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with -// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// Storage Class Options -// -// By default, Amazon S3 uses the Standard storage class to store newly created -// objects. The Standard storage class provides high durability and high availability. -// You can specify other storage classes depending on the performance needs. -// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Versioning -// -// If you enable versioning for a bucket, Amazon S3 automatically generates -// a unique version ID for the object being stored. Amazon S3 returns this ID -// in the response using the x-amz-version-id response header. If versioning -// is suspended, Amazon S3 always uses null as the version ID for the object -// stored. For more information about returning the versioning state of a bucket, -// see GetBucketVersioning. If you enable versioning for a bucket, when Amazon -// S3 receives multiple write requests for the same object simultaneously, it -// stores all of the objects. -// -// Related Resources -// -// * CopyObject -// -// * DeleteObject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObject for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject -func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - return out, req.Send() -} - -// PutObjectWithContext is the same as PutObject with the addition of -// the ability to pass a context and additional request options. -// -// See PutObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectAcl = "PutObjectAcl" - -// PutObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectAcl operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObjectAcl for more information on using the PutObjectAcl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutObjectAclRequest method. -// req, resp := client.PutObjectAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl -func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { - op := &request.Operation{ - Name: opPutObjectAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?acl", - } - - if input == nil { - input = &PutObjectAclInput{} - } - - output = &PutObjectAclOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObjectAcl API operation for Amazon Simple Storage Service. -// -// Uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket. You must have WRITE_ACP permission -// to set the ACL of an object. -// -// Depending on your application needs, you can choose to set the ACL on an -// object using either the request body or the headers. For example, if you -// have an existing application that updates a bucket ACL using the request -// body, you can continue to use that approach. -// -// Access Permissions -// -// You can set access permissions using one of the following methods: -// -// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports -// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a -// predefined set of grantees and permissions. Specify the canned ACL name -// as the value of x-amz-acl. If you use this header, you cannot use other -// access control-specific headers in your request. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using -// these headers, you specify explicit access permissions and grantees (AWS -// accounts or Amazon S3 groups) who will receive the permission. If you -// use these ACL-specific headers, you cannot use x-amz-acl header to set -// a canned ACL. These parameters map to the set of permissions that Amazon -// S3 supports in an ACL. For more information, see Access Control List (ACL) -// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// You specify each grantee as a type=value pair, where the type is one of -// the following: emailAddress – if the value specified is the email address -// of an AWS account id – if the value specified is the canonical user -// ID of an AWS account uri – if you are granting permissions to a predefined -// group For example, the following x-amz-grant-read header grants list objects -// permission to the two AWS accounts identified by their email addresses. -// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// Grantee Values -// -// You can specify the person (grantee) to whom you're assigning access rights -// (using request elements) in the following ways: -// -// * By Email address: <>Grantees@email.com<>lt;/Grantee> -// The grantee is resolved to the CanonicalUser and, in a response to a GET -// Object acl request, appears as the CanonicalUser. -// -// * By the person's ID: <>ID<><>GranteesEmail<> -// DisplayName is optional and ignored in the request. -// -// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// Versioning -// -// The ACL of an object is set at the object version level. By default, PUT -// sets the ACL of the current version of an object. To set the ACL of a different -// version, use the versionId subresource. -// -// Related Resources -// -// * CopyObject -// -// * GetObject -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectAcl for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl -func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { - req, out := c.PutObjectAclRequest(input) - return out, req.Send() -} - -// PutObjectAclWithContext is the same as PutObjectAcl with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { - req, out := c.PutObjectAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectLegalHold = "PutObjectLegalHold" - -// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectLegalHold operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObjectLegalHold for more information on using the PutObjectLegalHold -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutObjectLegalHoldRequest method. -// req, resp := client.PutObjectLegalHoldRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold -func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { - op := &request.Operation{ - Name: opPutObjectLegalHold, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?legal-hold", - } - - if input == nil { - input = &PutObjectLegalHoldInput{} - } - - output = &PutObjectLegalHoldOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObjectLegalHold API operation for Amazon Simple Storage Service. -// -// Applies a Legal Hold configuration to the specified object. -// -// Related Resources -// -// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectLegalHold for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold -func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { - req, out := c.PutObjectLegalHoldRequest(input) - return out, req.Send() -} - -// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectLegalHold for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { - req, out := c.PutObjectLegalHoldRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectLockConfiguration = "PutObjectLockConfiguration" - -// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectLockConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutObjectLockConfigurationRequest method. -// req, resp := client.PutObjectLockConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration -func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { - op := &request.Operation{ - Name: opPutObjectLockConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?object-lock", - } - - if input == nil { - input = &PutObjectLockConfigurationInput{} - } - - output = &PutObjectLockConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. -// -// Places an Object Lock configuration on the specified bucket. The rule specified -// in the Object Lock configuration will be applied by default to every new -// object placed in the specified bucket. -// -// DefaultRetention requires either Days or Years. You can't specify both at -// the same time. -// -// Related Resources -// -// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectLockConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration -func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { - req, out := c.PutObjectLockConfigurationRequest(input) - return out, req.Send() -} - -// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectLockConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { - req, out := c.PutObjectLockConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectRetention = "PutObjectRetention" - -// PutObjectRetentionRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectRetention operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObjectRetention for more information on using the PutObjectRetention -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutObjectRetentionRequest method. -// req, resp := client.PutObjectRetentionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention -func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { - op := &request.Operation{ - Name: opPutObjectRetention, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?retention", - } - - if input == nil { - input = &PutObjectRetentionInput{} - } - - output = &PutObjectRetentionOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObjectRetention API operation for Amazon Simple Storage Service. -// -// Places an Object Retention configuration on an object. -// -// Related Resources -// -// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectRetention for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention -func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { - req, out := c.PutObjectRetentionRequest(input) - return out, req.Send() -} - -// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectRetention for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { - req, out := c.PutObjectRetentionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectTagging = "PutObjectTagging" - -// PutObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObjectTagging for more information on using the PutObjectTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutObjectTaggingRequest method. -// req, resp := client.PutObjectTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { - op := &request.Operation{ - Name: opPutObjectTagging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?tagging", - } - - if input == nil { - input = &PutObjectTaggingInput{} - } - - output = &PutObjectTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObjectTagging API operation for Amazon Simple Storage Service. -// -// Sets the supplied tag-set to an object that already exists in a bucket -// -// A tag is a key-value pair. You can associate tags with an object by sending -// a PUT request against the tagging subresource that is associated with the -// object. You can retrieve tags by sending a GET request. For more information, -// see GetObjectTagging. -// -// For tagging-related restrictions related to characters and encodings, see -// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). -// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. -// -// To use this operation, you must have permission to perform the s3:PutObjectTagging -// action. By default, the bucket owner has this permission and can grant this -// permission to others. -// -// To put tags of any other version, use the versionId query parameter. You -// also need permission for the s3:PutObjectVersionTagging action. -// -// For information about the Amazon S3 object tagging feature, see Object Tagging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). -// -// Special Errors -// -// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This -// error can occur if the tag did not pass input validation. For more information, -// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). -// -// * Code: MalformedXMLError Cause: The XML provided does not match the schema. -// -// * Code: OperationAbortedError Cause: A conflicting conditional operation -// is currently in progress against this resource. Please try again. -// -// * Code: InternalError Cause: The service was unable to apply the provided -// tag to the object. -// -// Related Resources -// -// * GetObjectTagging -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) - return out, req.Send() -} - -// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutPublicAccessBlock = "PutPublicAccessBlock" - -// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the PutPublicAccessBlock operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutPublicAccessBlockRequest method. -// req, resp := client.PutPublicAccessBlockRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock -func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { - op := &request.Operation{ - Name: opPutPublicAccessBlock, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?publicAccessBlock", - } - - if input == nil { - input = &PutPublicAccessBlockInput{} - } - - output = &PutPublicAccessBlockOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutPublicAccessBlock API operation for Amazon Simple Storage Service. -// -// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 -// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. For more information about Amazon S3 permissions, see Specifying -// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// -// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket -// or an object, it checks the PublicAccessBlock configuration for both the -// bucket (or the bucket that contains the object) and the bucket owner's account. -// If the PublicAccessBlock configurations are different between the bucket -// and the account, Amazon S3 uses the most restrictive combination of the bucket-level -// and account-level settings. -// -// For more information about when Amazon S3 considers a bucket or an object -// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). -// -// Related Resources -// -// * GetPublicAccessBlock -// -// * DeletePublicAccessBlock -// -// * GetBucketPolicyStatus -// -// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutPublicAccessBlock for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock -func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { - req, out := c.PutPublicAccessBlockRequest(input) - return out, req.Send() -} - -// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of -// the ability to pass a context and additional request options. -// -// See PutPublicAccessBlock for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { - req, out := c.PutPublicAccessBlockRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRestoreObject = "RestoreObject" - -// RestoreObjectRequest generates a "aws/request.Request" representing the -// client's request for the RestoreObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RestoreObject for more information on using the RestoreObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RestoreObjectRequest method. -// req, resp := client.RestoreObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { - op := &request.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", - } - - if input == nil { - input = &RestoreObjectInput{} - } - - output = &RestoreObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// RestoreObject API operation for Amazon Simple Storage Service. -// -// Restores an archived copy of an object back into Amazon S3 -// -// This operation performs the following types of requests: -// -// * select - Perform a select query on an archived object -// -// * restore an archive - Restore an archived object -// -// To use this operation, you must have permissions to perform the s3:RestoreObject -// and s3:GetObject actions. The bucket owner has this permission by default -// and can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Querying Archives with Select Requests -// -// You use a select type of request to perform SQL queries on archived objects. -// The archived objects that are being queried by the select request must be -// formatted as uncompressed comma-separated values (CSV) files. You can run -// queries and custom analytics on your archived data without having to restore -// your data to a hotter Amazon S3 tier. For an overview about select requests, -// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// When making a select request, do the following: -// -// * Define an output location for the select query's output. This must be -// an Amazon S3 bucket in the same AWS Region as the bucket that contains -// the archive object that is being queried. The AWS account that initiates -// the job must have permissions to write to the S3 bucket. You can specify -// the storage class and encryption for the output objects stored in the -// bucket. For more information about output, see Querying Archived Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon Simple Storage Service Developer Guide. For more information -// about the S3 structure in the request body, see the following: PutObject -// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon Simple Storage Service Developer Guide Protecting Data Using -// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon Simple Storage Service Developer Guide -// -// * Define the SQL expression for the SELECT type of restoration for your -// query in the request body's SelectParameters structure. You can use expressions -// like the following examples. The following expression returns all records -// from the specified object. SELECT * FROM Object Assuming that you are -// not using any headers for data stored in the object, you can specify columns -// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > -// 100 If you have headers and you set the fileHeaderInfo in the CSV structure -// in the request body to USE, you can specify headers in the query. (If -// you set the fileHeaderInfo field to IGNORE, the first row is skipped for -// the query.) You cannot mix ordinal positions with header column names. -// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s -// -// For more information about using SQL with Glacier Select restore, see SQL -// Reference for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// When making a select request, you can also do the following: -// -// * To expedite your queries, specify the Expedited tier. For more information -// about tiers, see "Restoring Archives," later in this topic. -// -// * Specify details about the data serialization format of both the input -// object that is being queried and the serialization of the CSV-encoded -// query results. -// -// The following are additional important facts about the select feature: -// -// * The output results are new Amazon S3 objects. Unlike archive retrievals, -// they are stored until explicitly deleted-manually or through a lifecycle -// policy. -// -// * You can issue more than one select request on the same Amazon S3 object. -// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. -// -// * Amazon S3 accepts a select request even if the object has already been -// restored. A select request doesn’t return error response 409. -// -// Restoring Archives -// -// Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To -// access an archived object, you must first initiate a restore request. This -// restores a temporary copy of the archived object. In a restore request, you -// specify the number of days that you want the restored copy to exist. After -// the specified period, Amazon S3 deletes the temporary copy but the object -// remains archived in the GLACIER or DEEP_ARCHIVE storage class that object -// was restored from. -// -// To restore a specific object version, you can provide a version ID. If you -// don't provide a version ID, Amazon S3 restores the current version. -// -// The time it takes restore jobs to finish depends on which storage class the -// object is being restored from and which data access tier you specify. -// -// When restoring an archived object (or using a select request), you can specify -// one of the following data access tier options in the Tier element of the -// request body: -// -// * Expedited - Expedited retrievals allow you to quickly access your data -// stored in the GLACIER storage class when occasional urgent requests for -// a subset of archives are required. For all but the largest archived objects -// (250 MB+), data accessed using Expedited retrievals are typically made -// available within 1–5 minutes. Provisioned capacity ensures that retrieval -// capacity for Expedited retrievals is available when you need it. Expedited -// retrievals and provisioned capacity are not available for the DEEP_ARCHIVE -// storage class. -// -// * Standard - Standard retrievals allow you to access any of your archived -// objects within several hours. This is the default option for the GLACIER -// and DEEP_ARCHIVE retrieval requests that do not specify the retrieval -// option. Standard retrievals typically complete within 3-5 hours from the -// GLACIER storage class and typically complete within 12 hours from the -// DEEP_ARCHIVE storage class. -// -// * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval -// option, enabling you to retrieve large amounts, even petabytes, of data -// inexpensively in a day. Bulk retrievals typically complete within 5-12 -// hours from the GLACIER storage class and typically complete within 48 -// hours from the DEEP_ARCHIVE storage class. -// -// For more information about archive retrieval options and provisioned capacity -// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// You can use Amazon S3 restore speed upgrade to change the restore speed to -// a faster speed while it is in progress. You upgrade the speed of an in-progress -// restoration by issuing another restore request to the same object, setting -// a new Tier request element. When issuing a request to upgrade the restore -// tier, you must choose a tier that is faster than the tier that the in-progress -// restore is using. You must not change any other parameters, such as the Days -// request element. For more information, see Upgrading the Speed of an In-Progress -// Restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// To get the status of object restoration, you can send a HEAD request. Operations -// return the x-amz-restore header, which provides information about the restoration -// status, in the response. You can use Amazon S3 event notifications to notify -// you when a restore is initiated or completed. For more information, see Configuring -// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// After restoring an archived object, you can update the restoration period -// by reissuing the request with a new period. Amazon S3 updates the restoration -// period relative to the current time and charges only for the request-there -// are no data transfer charges. You cannot update the restoration period when -// Amazon S3 is actively processing your current restore request for the object. -// -// If your bucket has a lifecycle configuration with a rule that includes an -// expiration action, the object expiration overrides the life span that you -// specify in a restore request. For example, if you restore an object copy -// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes -// the object in 3 days. For more information about lifecycle configuration, -// see PutBucketLifecycleConfiguration and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in Amazon Simple Storage Service Developer Guide. -// -// Responses -// -// A successful operation returns either the 200 OK or 202 Accepted status code. -// -// * If the object copy is not previously restored, then Amazon S3 returns -// 202 Accepted in the response. -// -// * If the object copy is previously restored, Amazon S3 returns 200 OK -// in the response. -// -// Special Errors -// -// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. -// (This error does not apply to SELECT type requests.) HTTP Status Code: -// 409 Conflict SOAP Fault Code Prefix: Client -// -// * Code: GlacierExpeditedRetrievalNotAvailable Cause: Glacier expedited -// retrievals are currently not available. Try again later. (Returned if -// there is insufficient capacity to process the Expedited request. This -// error applies only to Expedited retrievals and not to Standard or Bulk -// retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A -// -// Related Resources -// -// * PutBucketLifecycleConfiguration -// -// * GetBucketNotificationConfiguration -// -// * SQL Reference for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation RestoreObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - return out, req.Send() -} - -// RestoreObjectWithContext is the same as RestoreObject with the addition of -// the ability to pass a context and additional request options. -// -// See RestoreObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSelectObjectContent = "SelectObjectContent" - -// SelectObjectContentRequest generates a "aws/request.Request" representing the -// client's request for the SelectObjectContent operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SelectObjectContent for more information on using the SelectObjectContent -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SelectObjectContentRequest method. -// req, resp := client.SelectObjectContentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent -func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { - op := &request.Operation{ - Name: opSelectObjectContent, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", - } - - if input == nil { - input = &SelectObjectContentInput{} - } - - output = &SelectObjectContentOutput{} - req = c.newRequest(op, input, output) - - es := newSelectObjectContentEventStream() - req.Handlers.Unmarshal.PushBack(es.setStreamCloser) - output.EventStream = es - - req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) - req.Handlers.Unmarshal.PushBack(es.runOutputStream) - req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) - return -} - -// SelectObjectContent API operation for Amazon Simple Storage Service. -// -// This operation filters the contents of an Amazon S3 object based on a simple -// structured query language (SQL) statement. In the request, along with the -// SQL expression, you must also specify a data serialization format (JSON, -// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse -// object data into records, and returns only records that match the specified -// SQL expression. You must also specify the data serialization format for the -// response. -// -// For more information about Amazon S3 Select, see Selecting Content from Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// For more information about using SQL with Amazon S3 Select, see SQL Reference -// for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Permissions -// -// You must have s3:GetObject permission for this operation. Amazon S3 Select -// does not support anonymous access. For more information about permissions, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Object Data Formats -// -// You can use Amazon S3 Select to query objects that have the following format -// properties: -// -// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. -// -// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. -// -// * GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. -// GZIP and BZIP2 are the only compression formats that Amazon S3 Select -// supports for CSV and JSON files. Amazon S3 Select supports columnar compression -// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object -// compression for Parquet objects. -// -// * Server-side encryption - Amazon S3 Select supports querying objects -// that are protected with server-side encryption. For objects that are encrypted -// with customer-provided encryption keys (SSE-C), you must use HTTPS, and -// you must use the headers that are documented in the GetObject. For more -// information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon Simple Storage Service Developer Guide. For objects that -// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer -// master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side -// encryption is handled transparently, so you don't need to specify anything. -// For more information about server-side encryption, including SSE-S3 and -// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Working with the Response Body -// -// Given the response size is unknown, Amazon S3 Select streams the response -// as a series of messages and includes a Transfer-Encoding header with chunked -// as its value in the response. For more information, see RESTSelectObjectAppendix . -// -// GetObject Support -// -// The SelectObjectContent operation does not support the following GetObject -// functionality. For more information, see GetObject. -// -// * Range: While you can specify a scan range for a Amazon S3 Select request, -// see SelectObjectContentRequest$ScanRange in the request parameters below, -// you cannot specify the range of bytes of an object to return. -// -// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot -// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. -// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) -// in the Amazon Simple Storage Service Developer Guide. -// -// Special Errors -// -// For a list of special errors for this operation and for general information -// about Amazon S3 errors and a list of error codes, see ErrorResponses -// -// Related Resources -// -// * GetObject -// -// * GetBucketLifecycleConfiguration -// -// * PutBucketLifecycleConfiguration -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation SelectObjectContent for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent -func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { - req, out := c.SelectObjectContentRequest(input) - return out, req.Send() -} - -// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of -// the ability to pass a context and additional request options. -// -// See SelectObjectContent for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { - req, out := c.SelectObjectContentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. -type SelectObjectContentEventStream struct { - - // Reader is the EventStream reader for the SelectObjectContentEventStream - // events. This value is automatically set by the SDK when the API call is made - // Use this member when unit testing your code with the SDK to mock out the - // EventStream Reader. - // - // Must not be nil. - Reader SelectObjectContentEventStreamReader - - outputReader io.ReadCloser - - // StreamCloser is the io.Closer for the EventStream connection. For HTTP - // EventStream this is the response Body. The stream will be closed when - // the Close method of the EventStream is called. - StreamCloser io.Closer - - done chan struct{} - closeOnce sync.Once - err *eventstreamapi.OnceError -} - -func newSelectObjectContentEventStream() *SelectObjectContentEventStream { - return &SelectObjectContentEventStream{ - done: make(chan struct{}), - err: eventstreamapi.NewOnceError(), - } -} - -func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { - es.StreamCloser = r.HTTPResponse.Body -} - -func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { - if es.done == nil { - return - } - go es.waitStreamPartClose() - -} - -func (es *SelectObjectContentEventStream) waitStreamPartClose() { - var outputErrCh <-chan struct{} - if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { - outputErrCh = v.ErrorSet() - } - var outputClosedCh <-chan struct{} - if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { - outputClosedCh = v.Closed() - } - - select { - case <-es.done: - case <-outputErrCh: - es.err.SetError(es.Reader.Err()) - es.Close() - case <-outputClosedCh: - if err := es.Reader.Err(); err != nil { - es.err.SetError(es.Reader.Err()) - } - es.Close() - } -} - -// Events returns a channel to read events from. -// -// These events are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { - return es.Reader.Events() -} - -func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { - var opts []func(*eventstream.Decoder) - if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { - opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) - } - - unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ - metadata: protocol.ResponseMetadata{ - StatusCode: r.HTTPResponse.StatusCode, - RequestID: r.RequestID, - }, - }.UnmarshalerForEventName - - decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) - eventReader := eventstreamapi.NewEventReader(decoder, - protocol.HandlerPayloadUnmarshal{ - Unmarshalers: r.Handlers.UnmarshalStream, - }, - unmarshalerForEvent, - ) - - es.outputReader = r.HTTPResponse.Body - es.Reader = newReadSelectObjectContentEventStream(eventReader) -} - -// Close closes the stream. This will also cause the stream to be closed. -// Close must be called when done using the stream API. Not calling Close -// may result in resource leaks. -// -// You can use the closing of the Reader's Events channel to terminate your -// application's read from the API's stream. -// -func (es *SelectObjectContentEventStream) Close() (err error) { - es.closeOnce.Do(es.safeClose) - return es.Err() -} - -func (es *SelectObjectContentEventStream) safeClose() { - if es.done != nil { - close(es.done) - } - - es.Reader.Close() - if es.outputReader != nil { - es.outputReader.Close() - } - - es.StreamCloser.Close() -} - -// Err returns any error that occurred while reading or writing EventStream -// Events from the service API's response. Returns nil if there were no errors. -func (es *SelectObjectContentEventStream) Err() error { - if err := es.err.Err(); err != nil { - return err - } - if err := es.Reader.Err(); err != nil { - return err - } - - return nil -} - -const opUploadPart = "UploadPart" - -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UploadPart for more information on using the UploadPart -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UploadPartRequest method. -// req, resp := client.UploadPartRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { - op := &request.Operation{ - Name: opUploadPart, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &UploadPartInput{} - } - - output = &UploadPartOutput{} - req = c.newRequest(op, input, output) - return -} - -// UploadPart API operation for Amazon Simple Storage Service. -// -// Uploads a part in a multipart upload. -// -// In this operation, you provide part data in your request. However, you have -// an option to specify your existing Amazon S3 object as a data source for -// the part you are uploading. To upload a part from an existing object, you -// use the UploadPartCopy operation. -// -// You must initiate a multipart upload (see CreateMultipartUpload) before you -// can upload any part. In response to your initiate request, Amazon S3 returns -// an upload ID, a unique identifier, that you must include in your upload part -// request. -// -// Part numbers can be any number from 1 to 10,000, inclusive. A part number -// uniquely identifies a part and also defines its position within the object -// being created. If you upload a new part using the same part number that was -// used with a previous part, the previously uploaded part is overwritten. Each -// part must be at least 5 MB in size, except the last part. There is no size -// limit on the last part of your multipart upload. -// -// To ensure that data is not corrupted when traversing the network, specify -// the Content-MD5 header in the upload part request. Amazon S3 checks the part -// data against the provided MD5 value. If they do not match, Amazon S3 returns -// an error. -// -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. -// -// For more information on multipart uploads, go to Multipart Upload Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the -// Amazon Simple Storage Service Developer Guide . -// -// For information on the permissions required to use the multipart upload API, -// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// You can optionally request server-side encryption where Amazon S3 encrypts -// your data as it writes it to disks in its data centers and decrypts it for -// you when you access it. You have the option of providing your own encryption -// key, or you can use the AWS managed encryption keys. If you choose to provide -// your own encryption key, the request headers you provide in the request must -// match the headers you used in the request to initiate the upload by using -// CreateMultipartUpload. For more information, go to Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Server-side encryption is supported by the S3 Multipart Upload actions. Unless -// you are using a customer-provided encryption key, you don't need to specify -// the encryption parameters in each UploadPart request. Instead, you only need -// to specify the server-side encryption parameters in the initial Initiate -// Multipart request. For more information, see CreateMultipartUpload. -// -// If you requested server-side encryption using a customer-provided encryption -// key in your initiate multipart upload request, you must provide identical -// encryption information in each part upload using the following headers. -// -// * x-amz-server-side​-encryption​-customer-algorithm -// -// * x-amz-server-side​-encryption​-customer-key -// -// * x-amz-server-side​-encryption​-customer-key-MD5 -// -// Special Errors -// -// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. -// The upload ID might be invalid, or the multipart upload might have been -// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code -// Prefix: Client -// -// Related Resources -// -// * CreateMultipartUpload -// -// * CompleteMultipartUpload -// -// * AbortMultipartUpload -// -// * ListParts -// -// * ListMultipartUploads -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPart for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) - return out, req.Send() -} - -// UploadPartWithContext is the same as UploadPart with the addition of -// the ability to pass a context and additional request options. -// -// See UploadPart for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUploadPartCopy = "UploadPartCopy" - -// UploadPartCopyRequest generates a "aws/request.Request" representing the -// client's request for the UploadPartCopy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UploadPartCopy for more information on using the UploadPartCopy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UploadPartCopyRequest method. -// req, resp := client.UploadPartCopyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { - op := &request.Operation{ - Name: opUploadPartCopy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &UploadPartCopyInput{} - } - - output = &UploadPartCopyOutput{} - req = c.newRequest(op, input, output) - return -} - -// UploadPartCopy API operation for Amazon Simple Storage Service. -// -// Uploads a part by copying data from an existing object as data source. You -// specify the data source by adding the request header x-amz-copy-source in -// your request and a byte range by adding the request header x-amz-copy-source-range -// in your request. -// -// The minimum allowable part size for a multipart upload is 5 MB. For more -// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// Instead of using an existing object as part data, you might use the UploadPart -// operation and provide data in your request. -// -// You must initiate a multipart upload before you can upload any part. In response -// to your initiate request. Amazon S3 returns a unique identifier, the upload -// ID, that you must include in your upload part request. -// -// For more information about using the UploadPartCopy operation, see the following: -// -// * For conceptual information about multipart uploads, see Uploading Objects -// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// * For information about permissions required to use the multipart upload -// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// * For information about copying objects using a single atomic operation -// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon Simple Storage Service Developer Guide. -// -// * For information about using server-side encryption with customer-provided -// encryption keys with the UploadPartCopy operation, see CopyObject and -// UploadPart. -// -// Note the following additional considerations about the request headers x-amz-copy-source-if-match, -// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and -// x-amz-copy-source-if-modified-since: -// -// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request as follows: x-amz-copy-source-if-match -// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since -// condition evaluates to false; Amazon S3 returns 200 OK and copies the -// data. -// -// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and -// x-amz-copy-source-if-modified-since headers are present in the request -// as follows: x-amz-copy-source-if-none-match condition evaluates to false, -// and; x-amz-copy-source-if-modified-since condition evaluates to true; -// Amazon S3 returns 412 Precondition Failed response code. -// -// Versioning -// -// If your bucket has versioning enabled, you could have multiple versions of -// the same object. By default, x-amz-copy-source identifies the current version -// of the object to copy. If the current version is a delete marker and you -// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 -// error, because the object does not exist. If you specify versionId in the -// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns -// an HTTP 400 error, because you are not allowed to specify a delete marker -// as a version for the x-amz-copy-source. -// -// You can optionally specify a specific version of the source object to copy -// by adding the versionId subresource as shown in the following example: -// -// x-amz-copy-source: /bucket/object?versionId=version id -// -// Special Errors -// -// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. -// The upload ID might be invalid, or the multipart upload might have been -// aborted or completed. HTTP Status Code: 404 Not Found -// -// * Code: InvalidRequest Cause: The specified copy source is not supported -// as a byte-range copy source. HTTP Status Code: 400 Bad Request -// -// Related Resources -// -// * CreateMultipartUpload -// -// * UploadPart -// -// * CompleteMultipartUpload -// -// * AbortMultipartUpload -// -// * ListParts -// -// * ListMultipartUploads -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPartCopy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) - return out, req.Send() -} - -// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of -// the ability to pass a context and additional request options. -// -// See UploadPartCopy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Specifies the days since the initiation of an incomplete multipart upload -// that Amazon S3 will wait before permanently removing all parts of the upload. -// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// in the Amazon Simple Storage Service Developer Guide. -type AbortIncompleteMultipartUpload struct { - _ struct{} `type:"structure"` - - // Specifies the number of days after which Amazon S3 aborts an incomplete multipart - // upload. - DaysAfterInitiation *int64 `type:"integer"` -} - -// String returns the string representation -func (s AbortIncompleteMultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortIncompleteMultipartUpload) GoString() string { - return s.String() -} - -// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. -func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { - s.DaysAfterInitiation = &v - return s -} - -type AbortMultipartUploadInput struct { - _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` - - // The bucket name to which the upload was taking place. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key of the object for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Upload ID that identifies the multipart upload. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s AbortMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AbortMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { - s.Bucket = &v - return s -} - -func (s *AbortMultipartUploadInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { - s.RequestPayer = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { - s.UploadId = &v - return s -} - -func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *AbortMultipartUploadInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type AbortMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s AbortMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortMultipartUploadOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { - s.RequestCharged = &v - return s -} - -// Configures the transfer acceleration state for an Amazon S3 bucket. For more -// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon Simple Storage Service Developer Guide. -type AccelerateConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies the transfer acceleration status of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` -} - -// String returns the string representation -func (s AccelerateConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccelerateConfiguration) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { - s.Status = &v - return s -} - -// Contains the elements that set the ACL permissions for an object per grantee. -type AccessControlPolicy struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - // Container for the bucket owner's display name and ID. - Owner *Owner `type:"structure"` -} - -// String returns the string representation -func (s AccessControlPolicy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccessControlPolicy) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AccessControlPolicy) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} - if s.Grants != nil { - for i, v := range s.Grants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrants sets the Grants field's value. -func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { - s.Grants = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { - s.Owner = v - return s -} - -// A container for information about access control for replicas. -type AccessControlTranslation struct { - _ struct{} `type:"structure"` - - // Specifies the replica ownership. For default and valid values, see PUT bucket - // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // in the Amazon Simple Storage Service API Reference. - // - // Owner is a required field - Owner *string `type:"string" required:"true" enum:"OwnerOverride"` -} - -// String returns the string representation -func (s AccessControlTranslation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccessControlTranslation) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AccessControlTranslation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} - if s.Owner == nil { - invalidParams.Add(request.NewErrParamRequired("Owner")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetOwner sets the Owner field's value. -func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { - s.Owner = &v - return s -} - -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates in any combination, -// and an object must match all of the predicates for the filter to apply. -type AnalyticsAndOperator struct { - _ struct{} `type:"structure"` - - // The prefix to use when evaluating an AND predicate: The prefix that an object - // must have to be included in the metrics results. - Prefix *string `type:"string"` - - // The list of tags to use when evaluating an AND predicate. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s AnalyticsAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AnalyticsAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { - s.Tags = v - return s -} - -// Specifies the configuration and any analyses for the analytics filter of -// an Amazon S3 bucket. -type AnalyticsConfiguration struct { - _ struct{} `type:"structure"` - - // The filter used to describe a set of objects for analyses. A filter must - // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). - // If no filter is provided, all objects will be considered in any analysis. - Filter *AnalyticsFilter `type:"structure"` - - // The ID that identifies the analytics configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // Contains data related to access patterns to be collected and made available - // to analyze the tradeoffs between different storage classes. - // - // StorageClassAnalysis is a required field - StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` -} - -// String returns the string representation -func (s AnalyticsConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AnalyticsConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.StorageClassAnalysis == nil { - invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - if s.StorageClassAnalysis != nil { - if err := s.StorageClassAnalysis.Validate(); err != nil { - invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFilter sets the Filter field's value. -func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { - s.Id = &v - return s -} - -// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. -func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { - s.StorageClassAnalysis = v - return s -} - -// Where to publish the analytics results. -type AnalyticsExportDestination struct { - _ struct{} `type:"structure"` - - // A destination signifying output to an S3 bucket. - // - // S3BucketDestination is a required field - S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` -} - -// String returns the string representation -func (s AnalyticsExportDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AnalyticsExportDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsExportDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} - if s.S3BucketDestination == nil { - invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) - } - if s.S3BucketDestination != nil { - if err := s.S3BucketDestination.Validate(); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3BucketDestination sets the S3BucketDestination field's value. -func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { - s.S3BucketDestination = v - return s -} - -// The filter used to describe a set of objects for analyses. A filter must -// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). -// If no filter is provided, all objects will be considered in any analysis. -type AnalyticsFilter struct { - _ struct{} `type:"structure"` - - // A conjunction (logical AND) of predicates, which is used in evaluating an - // analytics filter. The operator must have at least two predicates. - And *AnalyticsAndOperator `type:"structure"` - - // The prefix to use when evaluating an analytics filter. - Prefix *string `type:"string"` - - // The tag to use when evaluating an analytics filter. - Tag *Tag `type:"structure"` -} - -// String returns the string representation -func (s AnalyticsFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AnalyticsFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { - s.Tag = v - return s -} - -// Contains information about where to publish the analytics results. -type AnalyticsS3BucketDestination struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the bucket to which data is exported. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // The account ID that owns the destination bucket. If no account ID is provided, - // the owner will not be validated prior to exporting data. - BucketAccountId *string `type:"string"` - - // Specifies the file format used when exporting data to Amazon S3. - // - // Format is a required field - Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` - - // The prefix to use when exporting data. The prefix is prepended to all results. - Prefix *string `type:"string"` -} - -// String returns the string representation -func (s AnalyticsS3BucketDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AnalyticsS3BucketDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsS3BucketDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { - s.Bucket = &v - return s -} - -func (s *AnalyticsS3BucketDestination) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBucketAccountId sets the BucketAccountId field's value. -func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { - s.BucketAccountId = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { - s.Format = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { - s.Prefix = &v - return s -} - -// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name -// is globally unique, and the namespace is shared by all AWS accounts. -type Bucket struct { - _ struct{} `type:"structure"` - - // Date the bucket was created. - CreationDate *time.Time `type:"timestamp"` - - // The name of the bucket. - Name *string `type:"string"` -} - -// String returns the string representation -func (s Bucket) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Bucket) GoString() string { - return s.String() -} - -// SetCreationDate sets the CreationDate field's value. -func (s *Bucket) SetCreationDate(v time.Time) *Bucket { - s.CreationDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *Bucket) SetName(v string) *Bucket { - s.Name = &v - return s -} - -// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. -// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon Simple Storage Service Developer Guide. -type BucketLifecycleConfiguration struct { - _ struct{} `type:"structure"` - - // A lifecycle rule for individual objects in an Amazon S3 bucket. - // - // Rules is a required field - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s BucketLifecycleConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BucketLifecycleConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRules sets the Rules field's value. -func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { - s.Rules = v - return s -} - -// Container for logging status information. -type BucketLoggingStatus struct { - _ struct{} `type:"structure"` - - // Describes where logs are stored and the prefix that Amazon S3 assigns to - // all log object keys for a bucket. For more information, see PUT Bucket logging - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon Simple Storage Service API Reference. - LoggingEnabled *LoggingEnabled `type:"structure"` -} - -// String returns the string representation -func (s BucketLoggingStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BucketLoggingStatus) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLoggingStatus) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} - if s.LoggingEnabled != nil { - if err := s.LoggingEnabled.Validate(); err != nil { - invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLoggingEnabled sets the LoggingEnabled field's value. -func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { - s.LoggingEnabled = v - return s -} - -// Describes the cross-origin access configuration for objects in an Amazon -// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// Simple Storage Service Developer Guide. -type CORSConfiguration struct { - _ struct{} `type:"structure"` - - // A set of origins and methods (cross-origin access that you want to allow). - // You can add up to 100 rules to the configuration. - // - // CORSRules is a required field - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s CORSConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CORSConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CORSConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} - if s.CORSRules == nil { - invalidParams.Add(request.NewErrParamRequired("CORSRules")) - } - if s.CORSRules != nil { - for i, v := range s.CORSRules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCORSRules sets the CORSRules field's value. -func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { - s.CORSRules = v - return s -} - -// Specifies a cross-origin access rule for an Amazon S3 bucket. -type CORSRule struct { - _ struct{} `type:"structure"` - - // Headers that are specified in the Access-Control-Request-Headers header. - // These headers are allowed in a preflight OPTIONS request. In response to - // any preflight OPTIONS request, Amazon S3 returns any requested headers that - // are allowed. - AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - - // An HTTP method that you allow the origin to execute. Valid values are GET, - // PUT, HEAD, POST, and DELETE. - // - // AllowedMethods is a required field - AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` - - // One or more origins you want customers to be able to access the bucket from. - // - // AllowedOrigins is a required field - AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` - - // One or more headers in the response that you want customers to be able to - // access from their applications (for example, from a JavaScript XMLHttpRequest - // object). - ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` - - // The time in seconds that your browser is to cache the preflight response - // for the specified resource. - MaxAgeSeconds *int64 `type:"integer"` -} - -// String returns the string representation -func (s CORSRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CORSRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CORSRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSRule"} - if s.AllowedMethods == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) - } - if s.AllowedOrigins == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAllowedHeaders sets the AllowedHeaders field's value. -func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { - s.AllowedHeaders = v - return s -} - -// SetAllowedMethods sets the AllowedMethods field's value. -func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { - s.AllowedMethods = v - return s -} - -// SetAllowedOrigins sets the AllowedOrigins field's value. -func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { - s.AllowedOrigins = v - return s -} - -// SetExposeHeaders sets the ExposeHeaders field's value. -func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { - s.ExposeHeaders = v - return s -} - -// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. -func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { - s.MaxAgeSeconds = &v - return s -} - -// Describes how an uncompressed comma-separated values (CSV)-formatted input -// object is formatted. -type CSVInput struct { - _ struct{} `type:"structure"` - - // Specifies that CSV field values may contain quoted record delimiters and - // such records should be allowed. Default value is FALSE. Setting this value - // to TRUE may lower performance. - AllowQuotedRecordDelimiter *bool `type:"boolean"` - - // A single character used to indicate that a row should be ignored when the - // character is present at the start of that row. You can specify any character - // to indicate a comment line. - Comments *string `type:"string"` - - // A single character used to separate individual fields in a record. You can - // specify an arbitrary delimiter. - FieldDelimiter *string `type:"string"` - - // Describes the first line of input. Valid values are: - // - // * NONE: First line is not a header. - // - // * IGNORE: First line is a header, but you can't use the header values - // to indicate the column in an expression. You can use column position (such - // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). - // - // * Use: First line is a header, and you can use the header value to identify - // a column in an expression (SELECT "name" FROM OBJECT). - FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` - - // A single character used for escaping when the field delimiter is part of - // the value. For example, if the value is a, b, Amazon S3 wraps this field - // value in quotation marks, as follows: " a , b ". - // - // Type: String - // - // Default: " - // - // Ancestors: CSV - QuoteCharacter *string `type:"string"` - - // A single character used for escaping the quotation mark character inside - // an already escaped value. For example, the value """ a , b """ is parsed - // as " a , b ". - QuoteEscapeCharacter *string `type:"string"` - - // A single character used to separate individual records in the input. Instead - // of the default value, you can specify an arbitrary delimiter. - RecordDelimiter *string `type:"string"` -} - -// String returns the string representation -func (s CSVInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CSVInput) GoString() string { - return s.String() -} - -// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. -func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { - s.AllowQuotedRecordDelimiter = &v - return s -} - -// SetComments sets the Comments field's value. -func (s *CSVInput) SetComments(v string) *CSVInput { - s.Comments = &v - return s -} - -// SetFieldDelimiter sets the FieldDelimiter field's value. -func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { - s.FieldDelimiter = &v - return s -} - -// SetFileHeaderInfo sets the FileHeaderInfo field's value. -func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { - s.FileHeaderInfo = &v - return s -} - -// SetQuoteCharacter sets the QuoteCharacter field's value. -func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { - s.QuoteCharacter = &v - return s -} - -// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. -func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { - s.QuoteEscapeCharacter = &v - return s -} - -// SetRecordDelimiter sets the RecordDelimiter field's value. -func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { - s.RecordDelimiter = &v - return s -} - -// Describes how uncompressed comma-separated values (CSV)-formatted results -// are formatted. -type CSVOutput struct { - _ struct{} `type:"structure"` - - // The value used to separate individual fields in a record. You can specify - // an arbitrary delimiter. - FieldDelimiter *string `type:"string"` - - // A single character used for escaping when the field delimiter is part of - // the value. For example, if the value is a, b, Amazon S3 wraps this field - // value in quotation marks, as follows: " a , b ". - QuoteCharacter *string `type:"string"` - - // The single character used for escaping the quote character inside an already - // escaped value. - QuoteEscapeCharacter *string `type:"string"` - - // Indicates whether to use quotation marks around output fields. - // - // * ALWAYS: Always use quotation marks for output fields. - // - // * ASNEEDED: Use quotation marks for output fields when needed. - QuoteFields *string `type:"string" enum:"QuoteFields"` - - // A single character used to separate individual records in the output. Instead - // of the default value, you can specify an arbitrary delimiter. - RecordDelimiter *string `type:"string"` -} - -// String returns the string representation -func (s CSVOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CSVOutput) GoString() string { - return s.String() -} - -// SetFieldDelimiter sets the FieldDelimiter field's value. -func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { - s.FieldDelimiter = &v - return s -} - -// SetQuoteCharacter sets the QuoteCharacter field's value. -func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { - s.QuoteCharacter = &v - return s -} - -// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. -func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { - s.QuoteEscapeCharacter = &v - return s -} - -// SetQuoteFields sets the QuoteFields field's value. -func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { - s.QuoteFields = &v - return s -} - -// SetRecordDelimiter sets the RecordDelimiter field's value. -func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { - s.RecordDelimiter = &v - return s -} - -// Container for specifying the AWS Lambda notification configuration. -type CloudFunctionConfiguration struct { - _ struct{} `type:"structure"` - - // Lambda cloud function ARN that Amazon S3 can invoke when it detects events - // of the specified type. - CloudFunction *string `type:"string"` - - // The bucket event for which to send notifications. - // - // Deprecated: Event has been deprecated - Event *string `deprecated:"true" type:"string" enum:"Event"` - - // Bucket events for which to send notifications. - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // The role supporting the invocation of the Lambda function - InvocationRole *string `type:"string"` -} - -// String returns the string representation -func (s CloudFunctionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CloudFunctionConfiguration) GoString() string { - return s.String() -} - -// SetCloudFunction sets the CloudFunction field's value. -func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { - s.CloudFunction = &v - return s -} - -// SetEvent sets the Event field's value. -func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { - s.Event = &v - return s -} - -// SetEvents sets the Events field's value. -func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { - s.Events = v - return s -} - -// SetId sets the Id field's value. -func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { - s.Id = &v - return s -} - -// SetInvocationRole sets the InvocationRole field's value. -func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { - s.InvocationRole = &v - return s -} - -// Container for all (if there are any) keys between Prefix and the next occurrence -// of the string specified by a delimiter. CommonPrefixes lists keys that act -// like subdirectories in the directory specified by Prefix. For example, if -// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, -// the common prefix is notes/summer/. -type CommonPrefix struct { - _ struct{} `type:"structure"` - - // Container for the specified common prefix. - Prefix *string `type:"string"` -} - -// String returns the string representation -func (s CommonPrefix) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CommonPrefix) GoString() string { - return s.String() -} - -// SetPrefix sets the Prefix field's value. -func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { - s.Prefix = &v - return s -} - -type CompleteMultipartUploadInput struct { - _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` - - // Name of the bucket to which the multipart upload was initiated. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Object key for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The container for the multipart upload request information. - MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // ID for the initiated multipart upload. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s CompleteMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompleteMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CompleteMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { - s.Bucket = &v - return s -} - -func (s *CompleteMultipartUploadInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { - s.Key = &v - return s -} - -// SetMultipartUpload sets the MultipartUpload field's value. -func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { - s.MultipartUpload = v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { - s.RequestPayer = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { - s.UploadId = &v - return s -} - -func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type CompleteMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // The name of the bucket that contains the newly created object. - Bucket *string `type:"string"` - - // Entity tag that identifies the newly created object's data. Objects with - // different object data will have different entity tags. The entity tag is - // an opaque string. The entity tag may or may not be an MD5 digest of the object - // data. If the entity tag is not an MD5 digest of the object data, it will - // contain one or more nonhexadecimal characters and/or will consist of less - // than 32 or more than 32 hexadecimal digits. - ETag *string `type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The object key of the newly created object. - Key *string `min:"1" type:"string"` - - // The URI that identifies the newly created object. - Location *string `type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart - // upload request, the response includes this header. It confirms the encryption - // algorithm that Amazon S3 used to encrypt the object. - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version ID of the newly created object, in case the bucket has versioning - // turned on. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s CompleteMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompleteMultipartUploadOutput) GoString() string { - return s.String() -} - -// SetBucket sets the Bucket field's value. -func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { - s.Bucket = &v - return s -} - -func (s *CompleteMultipartUploadOutput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetETag sets the ETag field's value. -func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { - s.Expiration = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { - s.Key = &v - return s -} - -// SetLocation sets the Location field's value. -func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { - s.Location = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { - s.RequestCharged = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { - s.ServerSideEncryption = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { - s.VersionId = &v - return s -} - -// The container for the completed multipart upload details. -type CompletedMultipartUpload struct { - _ struct{} `type:"structure"` - - // Array of CompletedPart data types. - Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s CompletedMultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompletedMultipartUpload) GoString() string { - return s.String() -} - -// SetParts sets the Parts field's value. -func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { - s.Parts = v - return s -} - -// Details of the parts that were uploaded. -type CompletedPart struct { - _ struct{} `type:"structure"` - - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Part number that identifies the part. This is a positive integer between - // 1 and 10,000. - PartNumber *int64 `type:"integer"` -} - -// String returns the string representation -func (s CompletedPart) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompletedPart) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *CompletedPart) SetETag(v string) *CompletedPart { - s.ETag = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { - s.PartNumber = &v - return s -} - -// A container for describing a condition that must be met for the specified -// redirect to apply. For example, 1. If request is for pages in the /docs folder, -// redirect to the /documents folder. 2. If request results in HTTP error 4xx, -// redirect request to another host where you might process the error. -type Condition struct { - _ struct{} `type:"structure"` - - // The HTTP error code when the redirect is applied. In the event of an error, - // if the error code equals this value, then the specified redirect is applied. - // Required when parent element Condition is specified and sibling KeyPrefixEquals - // is not specified. If both are specified, then both must be true for the redirect - // to be applied. - HttpErrorCodeReturnedEquals *string `type:"string"` - - // The object key name prefix when the redirect is applied. For example, to - // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. - // To redirect request for all pages with the prefix docs/, the key prefix will - // be /docs, which identifies all objects in the docs/ folder. Required when - // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals - // is not specified. If both conditions are specified, both must be true for - // the redirect to be applied. - KeyPrefixEquals *string `type:"string"` -} - -// String returns the string representation -func (s Condition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Condition) GoString() string { - return s.String() -} - -// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. -func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { - s.HttpErrorCodeReturnedEquals = &v - return s -} - -// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. -func (s *Condition) SetKeyPrefixEquals(v string) *Condition { - s.KeyPrefixEquals = &v - return s -} - -type ContinuationEvent struct { - _ struct{} `locationName:"ContinuationEvent" type:"structure"` -} - -// String returns the string representation -func (s ContinuationEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ContinuationEvent) GoString() string { - return s.String() -} - -// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. -func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} - -// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *ContinuationEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - return nil -} - -func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) - return msg, err -} - -type CopyObjectInput struct { - _ struct{} `locationName:"CopyObjectRequest" type:"structure"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // The name of the destination bucket. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. - // - // CopySource is a required field - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` - - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` - - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` - - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` - - // Specifies the algorithm to use when decrypting the source object (for example, - // AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // The key of the destination object. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Specifies whether the metadata is copied from the source object or replaced - // with metadata provided in the request. - MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` - - // Specifies whether you want to apply a Legal Hold to the copied object. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The Object Lock mode that you want to apply to the copied object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when you want the copied object's Object Lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS Encryption Context to use for object encryption. The - // value of this header is a base64-encoded UTF-8 string holding JSON with the - // encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. For information about configuring using any of the officially - // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request - // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 Developer Guide. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The tag-set for the object destination object this value must be used in - // conjunction with the TaggingDirective. The tag-set must be encoded as URL - // Query parameters. - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - - // Specifies whether the object tag-set are copied from the source object or - // replaced with tag-set provided in the request. - TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s CopyObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CopyObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.CopySource == nil { - invalidParams.Add(request.NewErrParamRequired("CopySource")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { - s.ACL = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { - s.Bucket = &v - return s -} - -func (s *CopyObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCacheControl sets the CacheControl field's value. -func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { - s.CacheControl = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { - s.ContentLanguage = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { - s.ContentType = &v - return s -} - -// SetCopySource sets the CopySource field's value. -func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { - s.CopySource = &v - return s -} - -// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. -func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { - s.CopySourceIfMatch = &v - return s -} - -// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. -func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { - s.CopySourceIfModifiedSince = &v - return s -} - -// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. -func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { - s.CopySourceIfNoneMatch = &v - return s -} - -// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. -func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { - s.CopySourceIfUnmodifiedSince = &v - return s -} - -// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { - s.CopySourceSSECustomerAlgorithm = &v - return s -} - -// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { - s.CopySourceSSECustomerKey = &v - return s -} - -func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { - if s.CopySourceSSECustomerKey == nil { - return v - } - return *s.CopySourceSSECustomerKey -} - -// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { - s.CopySourceSSECustomerKeyMD5 = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { - s.Expires = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { - s.Key = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { - s.Metadata = v - return s -} - -// SetMetadataDirective sets the MetadataDirective field's value. -func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { - s.MetadataDirective = &v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { - s.SSECustomerKey = &v - return s -} - -func (s *CopyObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { - s.Tagging = &v - return s -} - -// SetTaggingDirective sets the TaggingDirective field's value. -func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { - s.TaggingDirective = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { - s.WebsiteRedirectLocation = &v - return s -} - -func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *CopyObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type CopyObjectOutput struct { - _ struct{} `type:"structure" payload:"CopyObjectResult"` - - // Container for all response elements. - CopyObjectResult *CopyObjectResult `type:"structure"` - - // Version of the copied object in the destination bucket. - CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If the object expiration is configured, the response includes this header. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the AWS KMS Encryption Context to use for object encryption. - // The value of this header is a base64-encoded UTF-8 string holding JSON with - // the encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version ID of the newly created copy. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s CopyObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectOutput) GoString() string { - return s.String() -} - -// SetCopyObjectResult sets the CopyObjectResult field's value. -func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { - s.CopyObjectResult = v - return s -} - -// SetCopySourceVersionId sets the CopySourceVersionId field's value. -func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { - s.CopySourceVersionId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { - s.Expiration = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { - s.VersionId = &v - return s -} - -// Container for all response elements. -type CopyObjectResult struct { - _ struct{} `type:"structure"` - - // Returns the ETag of the new object. The ETag reflects only changes to the - // contents of an object, not its metadata. The source and destination ETag - // is identical for a successfully copied object. - ETag *string `type:"string"` - - // Returns the date that the object was last modified. - LastModified *time.Time `type:"timestamp"` -} - -// String returns the string representation -func (s CopyObjectResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectResult) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { - s.LastModified = &v - return s -} - -// Container for all response elements. -type CopyPartResult struct { - _ struct{} `type:"structure"` - - // Entity tag of the object. - ETag *string `type:"string"` - - // Date and time at which the object was uploaded. - LastModified *time.Time `type:"timestamp"` -} - -// String returns the string representation -func (s CopyPartResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyPartResult) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *CopyPartResult) SetETag(v string) *CopyPartResult { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { - s.LastModified = &v - return s -} - -// The configuration information for the bucket. -type CreateBucketConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies the Region where the bucket will be created. If you don't specify - // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` -} - -// String returns the string representation -func (s CreateBucketConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketConfiguration) GoString() string { - return s.String() -} - -// SetLocationConstraint sets the LocationConstraint field's value. -func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { - s.LocationConstraint = &v - return s -} - -type CreateBucketInput struct { - _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - // The name of the bucket to create. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The configuration information for the bucket. - CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Specifies whether you want S3 Object Lock to be enabled for the new bucket. - ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` -} - -// String returns the string representation -func (s CreateBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { - s.ACL = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { - s.Bucket = &v - return s -} - -func (s *CreateBucketInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. -func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { - s.CreateBucketConfiguration = v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWrite sets the GrantWrite field's value. -func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { - s.GrantWrite = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { - s.GrantWriteACP = &v - return s -} - -// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. -func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { - s.ObjectLockEnabledForBucket = &v - return s -} - -type CreateBucketOutput struct { - _ struct{} `type:"structure"` - - // Specifies the Region where the bucket will be created. If you are creating - // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need - // to specify the location. - Location *string `location:"header" locationName:"Location" type:"string"` -} - -// String returns the string representation -func (s CreateBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketOutput) GoString() string { - return s.String() -} - -// SetLocation sets the Location field's value. -func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { - s.Location = &v - return s -} - -type CreateMultipartUploadInput struct { - _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // The name of the bucket to which to initiate the upload - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Object key for which the multipart upload is to be initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Specifies whether you want to apply a Legal Hold to the uploaded object. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // Specifies the Object Lock mode that you want to apply to the uploaded object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // Specifies the date and time when you want the Object Lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS Encryption Context to use for object encryption. The - // value of this header is a base64-encoded UTF-8 string holding JSON with the - // encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for - // object encryption. All GET and PUT requests for an object protected by AWS - // KMS will fail if not made via SSL or using SigV4. For information about configuring - // using any of the officially supported AWS SDKs and AWS CLI, see Specifying - // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 Developer Guide. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The tag-set for the object. The tag-set must be encoded as URL Query parameters. - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s CreateMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { - s.ACL = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { - s.Bucket = &v - return s -} - -func (s *CreateMultipartUploadInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCacheControl sets the CacheControl field's value. -func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { - s.CacheControl = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { - s.ContentLanguage = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { - s.ContentType = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { - s.Expires = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { - s.Key = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { - s.Metadata = v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { - s.SSECustomerKey = &v - return s -} - -func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { - s.Tagging = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { - s.WebsiteRedirectLocation = &v - return s -} - -func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *CreateMultipartUploadInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type CreateMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // If the bucket has a lifecycle rule configured with an action to abort incomplete - // multipart uploads and the prefix in the lifecycle rule matches the object - // name in the request, the response includes this header. The header indicates - // when the initiated multipart upload becomes eligible for an abort operation. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). - // - // The response also includes the x-amz-abort-rule-id header that provides the - // ID of the lifecycle configuration rule that defines this action. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - - // This header is returned along with the x-amz-abort-date header. It identifies - // the applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - - // Name of the bucket to which the multipart upload was initiated. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - Bucket *string `locationName:"Bucket" type:"string"` - - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the AWS KMS Encryption Context to use for object encryption. - // The value of this header is a base64-encoded UTF-8 string holding JSON with - // the encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // ID for the initiated multipart upload. - UploadId *string `type:"string"` -} - -// String returns the string representation -func (s CreateMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMultipartUploadOutput) GoString() string { - return s.String() -} - -// SetAbortDate sets the AbortDate field's value. -func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { - s.AbortDate = &v - return s -} - -// SetAbortRuleId sets the AbortRuleId field's value. -func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { - s.AbortRuleId = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { - s.Bucket = &v - return s -} - -func (s *CreateMultipartUploadOutput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { - s.Key = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { - s.ServerSideEncryption = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { - s.UploadId = &v - return s -} - -// The container element for specifying the default Object Lock retention settings -// for new objects placed in the specified bucket. -type DefaultRetention struct { - _ struct{} `type:"structure"` - - // The number of days that you want to specify for the default retention period. - Days *int64 `type:"integer"` - - // The default Object Lock retention mode you want to apply to new objects placed - // in the specified bucket. - Mode *string `type:"string" enum:"ObjectLockRetentionMode"` - - // The number of years that you want to specify for the default retention period. - Years *int64 `type:"integer"` -} - -// String returns the string representation -func (s DefaultRetention) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DefaultRetention) GoString() string { - return s.String() -} - -// SetDays sets the Days field's value. -func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { - s.Days = &v - return s -} - -// SetMode sets the Mode field's value. -func (s *DefaultRetention) SetMode(v string) *DefaultRetention { - s.Mode = &v - return s -} - -// SetYears sets the Years field's value. -func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { - s.Years = &v - return s -} - -// Container for the objects to delete. -type Delete struct { - _ struct{} `type:"structure"` - - // The objects to delete. - // - // Objects is a required field - Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` - - // Element to enable quiet mode for the request. When you add this element, - // you must set its value to true. - Quiet *bool `type:"boolean"` -} - -// String returns the string representation -func (s Delete) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Delete) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Delete) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Delete"} - if s.Objects == nil { - invalidParams.Add(request.NewErrParamRequired("Objects")) - } - if s.Objects != nil { - for i, v := range s.Objects { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetObjects sets the Objects field's value. -func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { - s.Objects = v - return s -} - -// SetQuiet sets the Quiet field's value. -func (s *Delete) SetQuiet(v bool) *Delete { - s.Quiet = &v - return s -} - -type DeleteBucketAnalyticsConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` - - // The name of the bucket from which an analytics configuration is deleted. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID that identifies the analytics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketAnalyticsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { - s.Id = &v - return s -} - -func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketAnalyticsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { - return s.String() -} - -type DeleteBucketCorsInput struct { - _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` - - // Specifies the bucket whose cors configuration is being deleted. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketCorsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketCorsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketCorsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketCorsOutput) GoString() string { - return s.String() -} - -type DeleteBucketEncryptionInput struct { - _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` - - // The name of the bucket containing the server-side encryption configuration - // to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketEncryptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketEncryptionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketEncryptionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketEncryptionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketEncryptionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketEncryptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketEncryptionOutput) GoString() string { - return s.String() -} - -type DeleteBucketInput struct { - _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` - - // Specifies the bucket being deleted. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketInventoryConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` - - // The name of the bucket containing the inventory configuration to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketInventoryConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketInventoryConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { - s.Id = &v - return s -} - -func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketInventoryConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketInventoryConfigurationOutput) GoString() string { - return s.String() -} - -type DeleteBucketLifecycleInput struct { - _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` - - // The bucket name of the lifecycle to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketLifecycleInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketLifecycleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketLifecycleOutput) GoString() string { - return s.String() -} - -type DeleteBucketMetricsConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` - - // The name of the bucket containing the metrics configuration to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketMetricsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketMetricsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { - s.Id = &v - return s -} - -func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketMetricsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketMetricsConfigurationOutput) GoString() string { - return s.String() -} - -type DeleteBucketOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketOutput) GoString() string { - return s.String() -} - -type DeleteBucketPolicyInput struct { - _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketPolicyInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketPolicyOutput) GoString() string { - return s.String() -} - -type DeleteBucketReplicationInput struct { - _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketReplicationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketReplicationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketReplicationOutput) GoString() string { - return s.String() -} - -type DeleteBucketTaggingInput struct { - _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` - - // The bucket that has the tag set to be removed. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketTaggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketTaggingOutput) GoString() string { - return s.String() -} - -type DeleteBucketWebsiteInput struct { - _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` - - // The bucket name for which you want to remove the website configuration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketWebsiteInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteBucketWebsiteOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketWebsiteOutput) GoString() string { - return s.String() -} - -// Information about the delete marker. -type DeleteMarkerEntry struct { - _ struct{} `type:"structure"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp"` - - // The account that created the delete marker.> - Owner *Owner `type:"structure"` - - // Version ID of an object. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s DeleteMarkerEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMarkerEntry) GoString() string { - return s.String() -} - -// SetIsLatest sets the IsLatest field's value. -func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { - s.IsLatest = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { - s.Key = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { - s.LastModified = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { - s.Owner = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { - s.VersionId = &v - return s -} - -// Specifies whether Amazon S3 replicates the delete markers. If you specify -// a Filter, you must specify this element. However, in the latest version of -// replication configuration (when Filter is specified), Amazon S3 doesn't replicate -// delete markers. Therefore, the DeleteMarkerReplication element can contain -// only Disabled. For an example configuration, see Basic Rule -// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). -// -// If you don't specify the Filter element, Amazon S3 assumes that the replication -// configuration is the earlier version, V1. In the earlier version, Amazon -// S3 handled replication of delete markers differently. For more information, -// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). -type DeleteMarkerReplication struct { - _ struct{} `type:"structure"` - - // Indicates whether to replicate delete markers. - // - // In the current implementation, Amazon S3 doesn't replicate the delete markers. - // The status must be Disabled. - Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` -} - -// String returns the string representation -func (s DeleteMarkerReplication) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMarkerReplication) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { - s.Status = &v - return s -} - -type DeleteObjectInput struct { - _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` - - // The bucket name of the bucket containing the object. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates whether S3 Object Lock should bypass Governance-mode restrictions - // to process this operation. - BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - - // Key name of the object to delete. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. Required to - // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s DeleteObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { - s.Bucket = &v - return s -} - -func (s *DeleteObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. -func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { - s.BypassGovernanceRetention = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { - s.Key = &v - return s -} - -// SetMFA sets the MFA field's value. -func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { - s.MFA = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { - s.VersionId = &v - return s -} - -func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteObjectOutput struct { - _ struct{} `type:"structure"` - - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Returns the version ID of the delete marker created as a result of the DELETE - // operation. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s DeleteObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectOutput) GoString() string { - return s.String() -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { - s.DeleteMarker = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { - s.RequestCharged = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { - s.VersionId = &v - return s -} - -type DeleteObjectTaggingInput struct { - _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` - - // The bucket name containing the objects from which to remove the tags. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Name of the tag. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The versionId of the object that the tag-set will be removed from. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s DeleteObjectTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { - s.Bucket = &v - return s -} - -func (s *DeleteObjectTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { - s.VersionId = &v - return s -} - -func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteObjectTaggingOutput struct { - _ struct{} `type:"structure"` - - // The versionId of the object the tag-set was removed from. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s DeleteObjectTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectTaggingOutput) GoString() string { - return s.String() -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { - s.VersionId = &v - return s -} - -type DeleteObjectsInput struct { - _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` - - // The bucket name containing the objects to delete. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies whether you want to delete this object even if it has a Governance-type - // Object Lock in place. You must have sufficient permissions to perform this - // operation. - BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - - // Container for the request. - // - // Delete is a required field - Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. Required to - // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation -func (s DeleteObjectsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Delete == nil { - invalidParams.Add(request.NewErrParamRequired("Delete")) - } - if s.Delete != nil { - if err := s.Delete.Validate(); err != nil { - invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { - s.Bucket = &v - return s -} - -func (s *DeleteObjectsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. -func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { - s.BypassGovernanceRetention = &v - return s -} - -// SetDelete sets the Delete field's value. -func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { - s.Delete = v - return s -} - -// SetMFA sets the MFA field's value. -func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { - s.MFA = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { - s.RequestPayer = &v - return s -} - -func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteObjectsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeleteObjectsOutput struct { - _ struct{} `type:"structure"` - - // Container element for a successful delete. It identifies the object that - // was successfully deleted. - Deleted []*DeletedObject `type:"list" flattened:"true"` - - // Container for a failed delete operation that describes the object that Amazon - // S3 attempted to delete and the error it encountered. - Errors []*Error `locationName:"Error" type:"list" flattened:"true"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s DeleteObjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectsOutput) GoString() string { - return s.String() -} - -// SetDeleted sets the Deleted field's value. -func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { - s.Deleted = v - return s -} - -// SetErrors sets the Errors field's value. -func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { - s.Errors = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { - s.RequestCharged = &v - return s -} - -type DeletePublicAccessBlockInput struct { - _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` - - // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeletePublicAccessBlockInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletePublicAccessBlockInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePublicAccessBlockInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { - s.Bucket = &v - return s -} - -func (s *DeletePublicAccessBlockInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type DeletePublicAccessBlockOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeletePublicAccessBlockOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletePublicAccessBlockOutput) GoString() string { - return s.String() -} - -// Information about the deleted object. -type DeletedObject struct { - _ struct{} `type:"structure"` - - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. In a simple DELETE, this header indicates - // whether (true) or not (false) a delete marker was created. - DeleteMarker *bool `type:"boolean"` - - // The version ID of the delete marker created as a result of the DELETE operation. - // If you delete a specific object version, the value returned by this header - // is the version ID of the object version deleted. - DeleteMarkerVersionId *string `type:"string"` - - // The name of the deleted object. - Key *string `min:"1" type:"string"` - - // The version ID of the deleted object. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s DeletedObject) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletedObject) GoString() string { - return s.String() -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { - s.DeleteMarker = &v - return s -} - -// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. -func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { - s.DeleteMarkerVersionId = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeletedObject) SetKey(v string) *DeletedObject { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeletedObject) SetVersionId(v string) *DeletedObject { - s.VersionId = &v - return s -} - -// Specifies information about where to publish analysis or configuration results -// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). -type Destination struct { - _ struct{} `type:"structure"` - - // Specify this only in a cross-account scenario (where source and destination - // bucket owners are not the same), and you want to change replica ownership - // to the AWS account that owns the destination bucket. If this is not specified - // in the replication configuration, the replicas are owned by same AWS account - // that owns the source object. - AccessControlTranslation *AccessControlTranslation `type:"structure"` - - // Destination bucket owner account ID. In a cross-account scenario, if you - // direct Amazon S3 to change replica ownership to the AWS account that owns - // the destination bucket by specifying the AccessControlTranslation property, - // this is the account ID of the destination bucket owner. For more information, - // see Replication Additional Configuration: Changing the Replica Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) - // in the Amazon Simple Storage Service Developer Guide. - Account *string `type:"string"` - - // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to - // store the results. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // A container that provides information about encryption. If SourceSelectionCriteria - // is specified, you must specify this element. - EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - - // A container specifying replication metrics-related settings enabling metrics - // and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified - // together with a ReplicationTime block. - Metrics *Metrics `type:"structure"` - - // A container specifying S3 Replication Time Control (S3 RTC), including whether - // S3 RTC is enabled and the time when all objects and operations on objects - // must be replicated. Must be specified together with a Metrics block. - ReplicationTime *ReplicationTime `type:"structure"` - - // The storage class to use when replicating objects, such as standard or reduced - // redundancy. By default, Amazon S3 uses the storage class of the source object - // to create the object replica. - // - // For valid values, see the StorageClass element of the PUT Bucket replication - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // action in the Amazon Simple Storage Service API Reference. - StorageClass *string `type:"string" enum:"StorageClass"` -} - -// String returns the string representation -func (s Destination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Destination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Destination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Destination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.AccessControlTranslation != nil { - if err := s.AccessControlTranslation.Validate(); err != nil { - invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) - } - } - if s.Metrics != nil { - if err := s.Metrics.Validate(); err != nil { - invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) - } - } - if s.ReplicationTime != nil { - if err := s.ReplicationTime.Validate(); err != nil { - invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessControlTranslation sets the AccessControlTranslation field's value. -func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { - s.AccessControlTranslation = v - return s -} - -// SetAccount sets the Account field's value. -func (s *Destination) SetAccount(v string) *Destination { - s.Account = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *Destination) SetBucket(v string) *Destination { - s.Bucket = &v - return s -} - -func (s *Destination) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. -func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { - s.EncryptionConfiguration = v - return s -} - -// SetMetrics sets the Metrics field's value. -func (s *Destination) SetMetrics(v *Metrics) *Destination { - s.Metrics = v - return s -} - -// SetReplicationTime sets the ReplicationTime field's value. -func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { - s.ReplicationTime = v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Destination) SetStorageClass(v string) *Destination { - s.StorageClass = &v - return s -} - -// Contains the type of server-side encryption used. -type Encryption struct { - _ struct{} `type:"structure"` - - // The server-side encryption algorithm used when storing job results in Amazon - // S3 (for example, AES256, aws:kms). - // - // EncryptionType is a required field - EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` - - // If the encryption type is aws:kms, this optional value can be used to specify - // the encryption context for the restore results. - KMSContext *string `type:"string"` - - // If the encryption type is aws:kms, this optional value specifies the ID of - // the symmetric customer managed AWS KMS CMK to use for encryption of job results. - // Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric - // and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the AWS Key Management Service Developer Guide. - KMSKeyId *string `type:"string" sensitive:"true"` -} - -// String returns the string representation -func (s Encryption) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Encryption) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Encryption) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Encryption"} - if s.EncryptionType == nil { - invalidParams.Add(request.NewErrParamRequired("EncryptionType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionType sets the EncryptionType field's value. -func (s *Encryption) SetEncryptionType(v string) *Encryption { - s.EncryptionType = &v - return s -} - -// SetKMSContext sets the KMSContext field's value. -func (s *Encryption) SetKMSContext(v string) *Encryption { - s.KMSContext = &v - return s -} - -// SetKMSKeyId sets the KMSKeyId field's value. -func (s *Encryption) SetKMSKeyId(v string) *Encryption { - s.KMSKeyId = &v - return s -} - -// Specifies encryption-related information for an Amazon S3 bucket that is -// a destination for replicated objects. -type EncryptionConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies the ID (Key ARN or Alias ARN) of the customer managed customer - // master key (CMK) stored in AWS Key Management Service (KMS) for the destination - // bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only - // supports symmetric customer managed CMKs. For more information, see Using - // Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the AWS Key Management Service Developer Guide. - ReplicaKmsKeyID *string `type:"string"` -} - -// String returns the string representation -func (s EncryptionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EncryptionConfiguration) GoString() string { - return s.String() -} - -// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. -func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { - s.ReplicaKmsKeyID = &v - return s -} - -// A message that indicates the request is complete and no more messages will -// be sent. You should not assume that the request is complete until the client -// receives an EndEvent. -type EndEvent struct { - _ struct{} `locationName:"EndEvent" type:"structure"` -} - -// String returns the string representation -func (s EndEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s EndEvent) GoString() string { - return s.String() -} - -// The EndEvent is and event in the SelectObjectContentEventStream group of events. -func (s *EndEvent) eventSelectObjectContentEventStream() {} - -// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *EndEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - return nil -} - -func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) - return msg, err -} - -// Container for all error elements. -type Error struct { - _ struct{} `type:"structure"` - - // The error code is a string that uniquely identifies an error condition. It - // is meant to be read and understood by programs that detect and handle errors - // by type. - // - // Amazon S3 error codes - // - // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 - // Forbidden SOAP Fault Code Prefix: Client - // - // * Code: AccountProblem Description: There is a problem with your AWS account - // that prevents the operation from completing successfully. Contact AWS - // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault - // Code Prefix: Client - // - // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource - // has been disabled. Contact AWS Support for further assistance. HTTP Status - // Code: 403 Forbidden SOAP Fault Code Prefix: Client - // - // * Code: AmbiguousGrantByEmailAddress Description: The email address you - // provided is associated with more than one account. HTTP Status Code: 400 - // Bad Request SOAP Fault Code Prefix: Client - // - // * Code: AuthorizationHeaderMalformed Description: The authorization header - // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status - // Code: N/A - // - // * Code: BadDigest Description: The Content-MD5 you specified did not match - // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: BucketAlreadyExists Description: The requested bucket name is - // not available. The bucket namespace is shared by all users of the system. - // Please select a different name and try again. HTTP Status Code: 409 Conflict - // SOAP Fault Code Prefix: Client - // - // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create - // already exists, and you own it. Amazon S3 returns this error in all AWS - // Regions except in the North Virginia Region. For legacy compatibility, - // if you re-create an existing bucket that you already own in the North - // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access - // control lists (ACLs). Code: 409 Conflict (in all Regions except the North - // Virginia Region) SOAP Fault Code Prefix: Client - // - // * Code: BucketNotEmpty Description: The bucket you tried to delete is - // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client - // - // * Code: CredentialsNotSupported Description: This request does not support - // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: CrossLocationLoggingProhibited Description: Cross-location logging - // not allowed. Buckets in one geographic location cannot log information - // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP - // Fault Code Prefix: Client - // - // * Code: EntityTooSmall Description: Your proposed upload is smaller than - // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP - // Fault Code Prefix: Client - // - // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum - // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code - // Prefix: Client - // - // * Code: ExpiredToken Description: The provided token has expired. HTTP - // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: IllegalVersioningConfigurationException Description: Indicates - // that the versioning configuration specified in the request is invalid. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: IncompleteBody Description: You did not provide the number of - // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 - // Bad Request SOAP Fault Code Prefix: Client - // - // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires - // exactly one file upload per request. HTTP Status Code: 400 Bad Request - // SOAP Fault Code Prefix: Client - // - // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum - // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: InternalError Description: We encountered an internal error. Please - // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code - // Prefix: Server - // - // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided - // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault - // Code Prefix: Client - // - // * Code: InvalidAddressingHeader Description: You must specify the Anonymous - // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client - // - // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: - // 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidBucketName Description: The specified bucket is not valid. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidBucketState Description: The request is not valid with - // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault - // Code Prefix: Client - // - // * Code: InvalidDigest Description: The Content-MD5 you specified is not - // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidEncryptionAlgorithmError Description: The encryption request - // you specified is not valid. The valid value is AES256. HTTP Status Code: - // 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidLocationConstraint Description: The specified location - // constraint is not valid. For more information about Regions, see How to - // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidObjectState Description: The operation is not valid for - // the current state of the object. HTTP Status Code: 403 Forbidden SOAP - // Fault Code Prefix: Client - // - // * Code: InvalidPart Description: One or more of the specified parts could - // not be found. The part might not have been uploaded, or the specified - // entity tag might not have matched the part's entity tag. HTTP Status Code: - // 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidPartOrder Description: The list of parts was not in ascending - // order. Parts list must be specified in order by part number. HTTP Status - // Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidPayer Description: All access to this object has been disabled. - // Please contact AWS Support for further assistance. HTTP Status Code: 403 - // Forbidden SOAP Fault Code Prefix: Client - // - // * Code: InvalidPolicyDocument Description: The content of the form does - // not meet the conditions specified in the policy document. HTTP Status - // Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidRange Description: The requested range cannot be satisfied. - // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code - // Prefix: Client - // - // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP - // Status Code: 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: SOAP requests must be made over an - // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is - // not supported for buckets with non-DNS compliant names. HTTP Status Code: - // 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is - // not supported for buckets with periods (.) in their names. HTTP Status - // Code: 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint - // only supports virtual style requests. HTTP Status Code: 400 Bad Request - // Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not - // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled - // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is - // not supported on this bucket. Contact AWS Support for more information. - // HTTP Status Code: 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot - // be enabled on this bucket. Contact AWS Support for more information. HTTP - // Status Code: 400 Bad Request Code: N/A - // - // * Code: InvalidSecurity Description: The provided security credentials - // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: - // Client - // - // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidStorageClass Description: The storage class you specified - // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: InvalidTargetBucketForLogging Description: The target bucket for - // logging does not exist, is not owned by you, or does not have the appropriate - // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP - // Fault Code Prefix: Client - // - // * Code: InvalidToken Description: The provided token is malformed or otherwise - // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP - // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: KeyTooLongError Description: Your key is too long. HTTP Status - // Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: MalformedACLError Description: The XML you provided was not well-formed - // or did not validate against our published schema. HTTP Status Code: 400 - // Bad Request SOAP Fault Code Prefix: Client - // - // * Code: MalformedPOSTRequest Description: The body of your POST request - // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request - // SOAP Fault Code Prefix: Client - // - // * Code: MalformedXML Description: This happens when the user sends malformed - // XML (XML that doesn't conform to the published XSD) for the configuration. - // The error message is, "The XML you provided was not well-formed or did - // not validate against our published schema." HTTP Status Code: 400 Bad - // Request SOAP Fault Code Prefix: Client - // - // * Code: MaxMessageLengthExceeded Description: Your request was too big. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: MaxPostPreDataLengthExceededError Description: Your POST request - // fields preceding the upload file were too large. HTTP Status Code: 400 - // Bad Request SOAP Fault Code Prefix: Client - // - // * Code: MetadataTooLarge Description: Your metadata headers exceed the - // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP - // Fault Code Prefix: Client - // - // * Code: MethodNotAllowed Description: The specified method is not allowed - // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault - // Code Prefix: Client - // - // * Code: MissingAttachment Description: A SOAP attachment was expected, - // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client - // - // * Code: MissingContentLength Description: You must provide the Content-Length - // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: - // Client - // - // * Code: MissingRequestBodyError Description: This happens when the user - // sends an empty XML document as a request. The error message is, "Request - // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing - // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code - // Prefix: Client - // - // * Code: MissingSecurityHeader Description: Your request is missing a required - // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: NoLoggingStatusForKey Description: There is no such thing as a - // logging status subresource for a key. HTTP Status Code: 400 Bad Request - // SOAP Fault Code Prefix: Client - // - // * Code: NoSuchBucket Description: The specified bucket does not exist. - // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client - // - // * Code: NoSuchBucketPolicy Description: The specified bucket does not - // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code - // Prefix: Client - // - // * Code: NoSuchKey Description: The specified key does not exist. HTTP - // Status Code: 404 Not Found SOAP Fault Code Prefix: Client - // - // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration - // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: - // Client - // - // * Code: NoSuchUpload Description: The specified multipart upload does - // not exist. The upload ID might be invalid, or the multipart upload might - // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault - // Code Prefix: Client - // - // * Code: NoSuchVersion Description: Indicates that the version ID specified - // in the request does not match an existing version. HTTP Status Code: 404 - // Not Found SOAP Fault Code Prefix: Client - // - // * Code: NotImplemented Description: A header you provided implies functionality - // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault - // Code Prefix: Server - // - // * Code: NotSignedUp Description: Your account is not signed up for the - // Amazon S3 service. You must sign up before you can use Amazon S3. You - // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status - // Code: 403 Forbidden SOAP Fault Code Prefix: Client - // - // * Code: OperationAborted Description: A conflicting conditional operation - // is currently in progress against this resource. Try again. HTTP Status - // Code: 409 Conflict SOAP Fault Code Prefix: Client - // - // * Code: PermanentRedirect Description: The bucket you are attempting to - // access must be addressed using the specified endpoint. Send all future - // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP - // Fault Code Prefix: Client - // - // * Code: PreconditionFailed Description: At least one of the preconditions - // you specified did not hold. HTTP Status Code: 412 Precondition Failed - // SOAP Fault Code Prefix: Client - // - // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 - // Moved Temporarily SOAP Fault Code Prefix: Client - // - // * Code: RestoreAlreadyInProgress Description: Object restore is already - // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client - // - // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be - // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request - // SOAP Fault Code Prefix: Client - // - // * Code: RequestTimeout Description: Your socket connection to the server - // was not read from or written to within the timeout period. HTTP Status - // Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: RequestTimeTooSkewed Description: The difference between the request - // time and the server's time is too large. HTTP Status Code: 403 Forbidden - // SOAP Fault Code Prefix: Client - // - // * Code: RequestTorrentOfBucketError Description: Requesting the torrent - // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP - // Fault Code Prefix: Client - // - // * Code: SignatureDoesNotMatch Description: The request signature we calculated - // does not match the signature you provided. Check your AWS secret access - // key and signing method. For more information, see REST Authentication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) - // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client - // - // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP - // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server - // - // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: - // 503 Slow Down SOAP Fault Code Prefix: Server - // - // * Code: TemporaryRedirect Description: You are being redirected to the - // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP - // Fault Code Prefix: Client - // - // * Code: TokenRefreshRequired Description: The provided token must be refreshed. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: TooManyBuckets Description: You have attempted to create more - // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code - // Prefix: Client - // - // * Code: UnexpectedContent Description: This request does not support content. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: UnresolvableGrantByEmailAddress Description: The email address - // you provided does not match any account on record. HTTP Status Code: 400 - // Bad Request SOAP Fault Code Prefix: Client - // - // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain - // the specified field name. If it is specified, check the order of the fields. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - Code *string `type:"string"` - - // The error key. - Key *string `min:"1" type:"string"` - - // The error message contains a generic description of the error condition in - // English. It is intended for a human audience. Simple programs display the - // message directly to the end user if they encounter an error condition they - // don't know how or don't care to handle. Sophisticated programs with more - // exhaustive error handling and proper internationalization are more likely - // to ignore the error message. - Message *string `type:"string"` - - // The version ID of the error. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s Error) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Error) GoString() string { - return s.String() -} - -// SetCode sets the Code field's value. -func (s *Error) SetCode(v string) *Error { - s.Code = &v - return s -} - -// SetKey sets the Key field's value. -func (s *Error) SetKey(v string) *Error { - s.Key = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *Error) SetMessage(v string) *Error { - s.Message = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *Error) SetVersionId(v string) *Error { - s.VersionId = &v - return s -} - -// The error information. -type ErrorDocument struct { - _ struct{} `type:"structure"` - - // The object key name to use when a 4XX class error occurs. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s ErrorDocument) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ErrorDocument) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ErrorDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *ErrorDocument) SetKey(v string) *ErrorDocument { - s.Key = &v - return s -} - -// Optional configuration to replicate existing source bucket objects. For more -// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) -// in the Amazon S3 Developer Guide. -type ExistingObjectReplication struct { - _ struct{} `type:"structure"` - - // Status is a required field - Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` -} - -// String returns the string representation -func (s ExistingObjectReplication) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExistingObjectReplication) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ExistingObjectReplication) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStatus sets the Status field's value. -func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { - s.Status = &v - return s -} - -// Specifies the Amazon S3 object key name to filter on and whether to filter -// on the suffix or prefix of the key name. -type FilterRule struct { - _ struct{} `type:"structure"` - - // The object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. The maximum length is 1,024 characters. Overlapping - // prefixes and suffixes are not supported. For more information, see Configuring - // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Name *string `type:"string" enum:"FilterRuleName"` - - // The value that the filter searches for in object key names. - Value *string `type:"string"` -} - -// String returns the string representation -func (s FilterRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FilterRule) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *FilterRule) SetName(v string) *FilterRule { - s.Name = &v - return s -} - -// SetValue sets the Value field's value. -func (s *FilterRule) SetValue(v string) *FilterRule { - s.Value = &v - return s -} - -type GetBucketAccelerateConfigurationInput struct { - _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` - - // Name of the bucket for which the accelerate configuration is retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketAccelerateConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAccelerateConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketAccelerateConfigurationOutput struct { - _ struct{} `type:"structure"` - - // The accelerate configuration of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` -} - -// String returns the string representation -func (s GetBucketAccelerateConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAccelerateConfigurationOutput) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { - s.Status = &v - return s -} - -type GetBucketAclInput struct { - _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` - - // Specifies the S3 bucket whose ACL is being requested. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { - s.Bucket = &v - return s -} - -func (s *GetBucketAclInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketAclInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketAclOutput struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - // Container for the bucket owner's display name and ID. - Owner *Owner `type:"structure"` -} - -// String returns the string representation -func (s GetBucketAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAclOutput) GoString() string { - return s.String() -} - -// SetGrants sets the Grants field's value. -func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { - s.Grants = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { - s.Owner = v - return s -} - -type GetBucketAnalyticsConfigurationInput struct { - _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` - - // The name of the bucket from which an analytics configuration is retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID that identifies the analytics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketAnalyticsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAnalyticsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { - s.Id = &v - return s -} - -func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` - - // The configuration and any analyses for the analytics filter. - AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetBucketAnalyticsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAnalyticsConfigurationOutput) GoString() string { - return s.String() -} - -// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. -func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { - s.AnalyticsConfiguration = v - return s -} - -type GetBucketCorsInput struct { - _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` - - // The bucket name for which to get the cors configuration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { - s.Bucket = &v - return s -} - -func (s *GetBucketCorsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketCorsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketCorsOutput struct { - _ struct{} `type:"structure"` - - // A set of origins and methods (cross-origin access that you want to allow). - // You can add up to 100 rules to the configuration. - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s GetBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketCorsOutput) GoString() string { - return s.String() -} - -// SetCORSRules sets the CORSRules field's value. -func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { - s.CORSRules = v - return s -} - -type GetBucketEncryptionInput struct { - _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` - - // The name of the bucket from which the server-side encryption configuration - // is retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketEncryptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketEncryptionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketEncryptionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { - s.Bucket = &v - return s -} - -func (s *GetBucketEncryptionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketEncryptionInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketEncryptionOutput struct { - _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` - - // Specifies the default server-side-encryption configuration. - ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetBucketEncryptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketEncryptionOutput) GoString() string { - return s.String() -} - -// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. -func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { - s.ServerSideEncryptionConfiguration = v - return s -} - -type GetBucketInventoryConfigurationInput struct { - _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` - - // The name of the bucket containing the inventory configuration to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketInventoryConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketInventoryConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { - s.Id = &v - return s -} - -func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` - - // Specifies the inventory configuration. - InventoryConfiguration *InventoryConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetBucketInventoryConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketInventoryConfigurationOutput) GoString() string { - return s.String() -} - -// SetInventoryConfiguration sets the InventoryConfiguration field's value. -func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { - s.InventoryConfiguration = v - return s -} - -type GetBucketLifecycleConfigurationInput struct { - _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` - - // The name of the bucket for which to get the lifecycle information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` - - // Container for a lifecycle rule. - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleConfigurationOutput) GoString() string { - return s.String() -} - -// SetRules sets the Rules field's value. -func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { - s.Rules = v - return s -} - -type GetBucketLifecycleInput struct { - _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` - - // The name of the bucket for which to get the lifecycle information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { - s.Bucket = &v - return s -} - -func (s *GetBucketLifecycleInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketLifecycleInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketLifecycleOutput struct { - _ struct{} `type:"structure"` - - // Container for a lifecycle rule. - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleOutput) GoString() string { - return s.String() -} - -// SetRules sets the Rules field's value. -func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { - s.Rules = v - return s -} - -type GetBucketLocationInput struct { - _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` - - // The name of the bucket for which to get the location. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLocationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLocationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLocationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketLocationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketLocationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketLocationOutput struct { - _ struct{} `type:"structure"` - - // Specifies the Region where the bucket resides. For a list of all the Amazon - // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` -} - -// String returns the string representation -func (s GetBucketLocationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLocationOutput) GoString() string { - return s.String() -} - -// SetLocationConstraint sets the LocationConstraint field's value. -func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { - s.LocationConstraint = &v - return s -} - -type GetBucketLoggingInput struct { - _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` - - // The bucket name for which to get the logging information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLoggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { - s.Bucket = &v - return s -} - -func (s *GetBucketLoggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketLoggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketLoggingOutput struct { - _ struct{} `type:"structure"` - - // Describes where logs are stored and the prefix that Amazon S3 assigns to - // all log object keys for a bucket. For more information, see PUT Bucket logging - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon Simple Storage Service API Reference. - LoggingEnabled *LoggingEnabled `type:"structure"` -} - -// String returns the string representation -func (s GetBucketLoggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLoggingOutput) GoString() string { - return s.String() -} - -// SetLoggingEnabled sets the LoggingEnabled field's value. -func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { - s.LoggingEnabled = v - return s -} - -type GetBucketMetricsConfigurationInput struct { - _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` - - // The name of the bucket containing the metrics configuration to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketMetricsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketMetricsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { - s.Id = &v - return s -} - -func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure" payload:"MetricsConfiguration"` - - // Specifies the metrics configuration. - MetricsConfiguration *MetricsConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetBucketMetricsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketMetricsConfigurationOutput) GoString() string { - return s.String() -} - -// SetMetricsConfiguration sets the MetricsConfiguration field's value. -func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { - s.MetricsConfiguration = v - return s -} - -type GetBucketNotificationConfigurationRequest struct { - _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` - - // Name of the bucket for which to get the notification configuration - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketNotificationConfigurationRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketNotificationConfigurationRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketNotificationConfigurationRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { - s.Bucket = &v - return s -} - -func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketPolicyInput struct { - _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` - - // The bucket name for which to get the bucket policy. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { - s.Bucket = &v - return s -} - -func (s *GetBucketPolicyInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketPolicyInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketPolicyOutput struct { - _ struct{} `type:"structure" payload:"Policy"` - - // The bucket policy as a JSON document. - Policy *string `type:"string"` -} - -// String returns the string representation -func (s GetBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyOutput) GoString() string { - return s.String() -} - -// SetPolicy sets the Policy field's value. -func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { - s.Policy = &v - return s -} - -type GetBucketPolicyStatusInput struct { - _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` - - // The name of the Amazon S3 bucket whose policy status you want to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketPolicyStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyStatusInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketPolicyStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { - s.Bucket = &v - return s -} - -func (s *GetBucketPolicyStatusInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketPolicyStatusOutput struct { - _ struct{} `type:"structure" payload:"PolicyStatus"` - - // The policy status for the specified bucket. - PolicyStatus *PolicyStatus `type:"structure"` -} - -// String returns the string representation -func (s GetBucketPolicyStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyStatusOutput) GoString() string { - return s.String() -} - -// SetPolicyStatus sets the PolicyStatus field's value. -func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { - s.PolicyStatus = v - return s -} - -type GetBucketReplicationInput struct { - _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` - - // The bucket name for which to get the replication information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketReplicationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketReplicationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketReplicationOutput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - - // A container for replication rules. You can add up to 1,000 rules. The maximum - // size of a replication configuration is 2 MB. - ReplicationConfiguration *ReplicationConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketReplicationOutput) GoString() string { - return s.String() -} - -// SetReplicationConfiguration sets the ReplicationConfiguration field's value. -func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { - s.ReplicationConfiguration = v - return s -} - -type GetBucketRequestPaymentInput struct { - _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` - - // The name of the bucket for which to get the payment request configuration - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketRequestPaymentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { - s.Bucket = &v - return s -} - -func (s *GetBucketRequestPaymentInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketRequestPaymentOutput struct { - _ struct{} `type:"structure"` - - // Specifies who pays for the download and request fees. - Payer *string `type:"string" enum:"Payer"` -} - -// String returns the string representation -func (s GetBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -// SetPayer sets the Payer field's value. -func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { - s.Payer = &v - return s -} - -type GetBucketTaggingInput struct { - _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` - - // The name of the bucket for which to get the tagging information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { - s.Bucket = &v - return s -} - -func (s *GetBucketTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketTaggingOutput struct { - _ struct{} `type:"structure"` - - // Contains the tag set. - // - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` -} - -// String returns the string representation -func (s GetBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketTaggingOutput) GoString() string { - return s.String() -} - -// SetTagSet sets the TagSet field's value. -func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { - s.TagSet = v - return s -} - -type GetBucketVersioningInput struct { - _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` - - // The name of the bucket for which to get the versioning information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketVersioningInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketVersioningInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { - s.Bucket = &v - return s -} - -func (s *GetBucketVersioningInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketVersioningInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketVersioningOutput struct { - _ struct{} `type:"structure"` - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` -} - -// String returns the string representation -func (s GetBucketVersioningOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketVersioningOutput) GoString() string { - return s.String() -} - -// SetMFADelete sets the MFADelete field's value. -func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { - s.MFADelete = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { - s.Status = &v - return s -} - -type GetBucketWebsiteInput struct { - _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` - - // The bucket name for which to get the website configuration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { - s.Bucket = &v - return s -} - -func (s *GetBucketWebsiteInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketWebsiteInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetBucketWebsiteOutput struct { - _ struct{} `type:"structure"` - - // The name of the error document for the website. - ErrorDocument *ErrorDocument `type:"structure"` - - // The name of the index document for the website. - IndexDocument *IndexDocument `type:"structure"` - - // Specifies the redirect behavior of all requests to a website endpoint of - // an Amazon S3 bucket. - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - // Rules that define when a redirect is applied and the redirect behavior. - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` -} - -// String returns the string representation -func (s GetBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketWebsiteOutput) GoString() string { - return s.String() -} - -// SetErrorDocument sets the ErrorDocument field's value. -func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { - s.ErrorDocument = v - return s -} - -// SetIndexDocument sets the IndexDocument field's value. -func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { - s.IndexDocument = v - return s -} - -// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. -func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { - s.RedirectAllRequestsTo = v - return s -} - -// SetRoutingRules sets the RoutingRules field's value. -func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { - s.RoutingRules = v - return s -} - -type GetObjectAclInput struct { - _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` - - // The bucket name that contains the object for which to get the ACL information. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The key of the object for which to get the ACL information. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { - s.Bucket = &v - return s -} - -func (s *GetObjectAclInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { - s.VersionId = &v - return s -} - -func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectAclInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetObjectAclOutput struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - // Container for the bucket owner's display name and ID. - Owner *Owner `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s GetObjectAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectAclOutput) GoString() string { - return s.String() -} - -// SetGrants sets the Grants field's value. -func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { - s.Grants = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { - s.Owner = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { - s.RequestCharged = &v - return s -} - -type GetObjectInput struct { - _ struct{} `locationName:"GetObjectRequest" type:"structure"` - - // The bucket name containing the object. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` - - // Key of the object to get. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of the object being read. This is a positive integer between - // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. - // Useful for downloading just a part of an object. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Sets the Cache-Control header of the response. - ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` - - // Sets the Content-Disposition header of the response - ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` - - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` - - // Sets the Content-Language header of the response. - ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` - - // Sets the Content-Type header of the response. - ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` - - // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { - s.Bucket = &v - return s -} - -func (s *GetObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetIfMatch sets the IfMatch field's value. -func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { - s.IfMatch = &v - return s -} - -// SetIfModifiedSince sets the IfModifiedSince field's value. -func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { - s.IfModifiedSince = &v - return s -} - -// SetIfNoneMatch sets the IfNoneMatch field's value. -func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { - s.IfNoneMatch = &v - return s -} - -// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. -func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { - s.IfUnmodifiedSince = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectInput) SetKey(v string) *GetObjectInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { - s.PartNumber = &v - return s -} - -// SetRange sets the Range field's value. -func (s *GetObjectInput) SetRange(v string) *GetObjectInput { - s.Range = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { - s.RequestPayer = &v - return s -} - -// SetResponseCacheControl sets the ResponseCacheControl field's value. -func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { - s.ResponseCacheControl = &v - return s -} - -// SetResponseContentDisposition sets the ResponseContentDisposition field's value. -func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { - s.ResponseContentDisposition = &v - return s -} - -// SetResponseContentEncoding sets the ResponseContentEncoding field's value. -func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { - s.ResponseContentEncoding = &v - return s -} - -// SetResponseContentLanguage sets the ResponseContentLanguage field's value. -func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { - s.ResponseContentLanguage = &v - return s -} - -// SetResponseContentType sets the ResponseContentType field's value. -func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { - s.ResponseContentType = &v - return s -} - -// SetResponseExpires sets the ResponseExpires field's value. -func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { - s.ResponseExpires = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { - s.SSECustomerKey = &v - return s -} - -func (s *GetObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { - s.VersionId = &v - return s -} - -func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetObjectLegalHoldInput struct { - _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` - - // The bucket name containing the object whose Legal Hold status you want to - // retrieve. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The key name for the object whose Legal Hold status you want to retrieve. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The version ID of the object whose Legal Hold status you want to retrieve. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectLegalHoldInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectLegalHoldInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectLegalHoldInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { - s.Bucket = &v - return s -} - -func (s *GetObjectLegalHoldInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { - s.VersionId = &v - return s -} - -func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetObjectLegalHoldOutput struct { - _ struct{} `type:"structure" payload:"LegalHold"` - - // The current Legal Hold status for the specified object. - LegalHold *ObjectLockLegalHold `type:"structure"` -} - -// String returns the string representation -func (s GetObjectLegalHoldOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectLegalHoldOutput) GoString() string { - return s.String() -} - -// SetLegalHold sets the LegalHold field's value. -func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { - s.LegalHold = v - return s -} - -type GetObjectLockConfigurationInput struct { - _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` - - // The bucket whose Object Lock configuration you want to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetObjectLockConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectLockConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectLockConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetObjectLockConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetObjectLockConfigurationOutput struct { - _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` - - // The specified bucket's Object Lock configuration. - ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetObjectLockConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectLockConfigurationOutput) GoString() string { - return s.String() -} - -// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. -func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { - s.ObjectLockConfiguration = v - return s -} - -type GetObjectOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - // Indicates that a range of bytes was specified. - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Object data. - Body io.ReadCloser `type:"blob"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The portion of the object returned in the response. - ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` - - // A map of metadata to store with the object in S3. - // - // By default unmarshaled keys are written as a map keys in following canonicalized format: - // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. - // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The Object Lock mode currently in place for this object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when this object's Object Lock will expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // The count of parts this object has. - PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` - - // Amazon S3 can return this if your request involves a bucket that is either - // a source or destination in a replication rule. - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Provides storage class information of the object. Amazon S3 returns this - // header for all objects except for Standard storage class objects. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The number of tags, if any, on the object. - TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s GetObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectOutput) GoString() string { - return s.String() -} - -// SetAcceptRanges sets the AcceptRanges field's value. -func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { - s.AcceptRanges = &v - return s -} - -// SetBody sets the Body field's value. -func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { - s.Body = v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { - s.CacheControl = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { - s.ContentLanguage = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { - s.ContentLength = &v - return s -} - -// SetContentRange sets the ContentRange field's value. -func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { - s.ContentRange = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { - s.ContentType = &v - return s -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { - s.DeleteMarker = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { - s.Expiration = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { - s.Expires = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { - s.LastModified = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { - s.Metadata = v - return s -} - -// SetMissingMeta sets the MissingMeta field's value. -func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { - s.MissingMeta = &v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetPartsCount sets the PartsCount field's value. -func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { - s.PartsCount = &v - return s -} - -// SetReplicationStatus sets the ReplicationStatus field's value. -func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { - s.ReplicationStatus = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { - s.RequestCharged = &v - return s -} - -// SetRestore sets the Restore field's value. -func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { - s.Restore = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { - s.StorageClass = &v - return s -} - -// SetTagCount sets the TagCount field's value. -func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { - s.TagCount = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { - s.VersionId = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { - s.WebsiteRedirectLocation = &v - return s -} - -type GetObjectRetentionInput struct { - _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` - - // The bucket name containing the object whose retention settings you want to - // retrieve. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The key name for the object whose retention settings you want to retrieve. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The version ID for the object whose retention settings you want to retrieve. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectRetentionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectRetentionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectRetentionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { - s.Bucket = &v - return s -} - -func (s *GetObjectRetentionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { - s.VersionId = &v - return s -} - -func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectRetentionInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetObjectRetentionOutput struct { - _ struct{} `type:"structure" payload:"Retention"` - - // The container element for an object's retention settings. - Retention *ObjectLockRetention `type:"structure"` -} - -// String returns the string representation -func (s GetObjectRetentionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectRetentionOutput) GoString() string { - return s.String() -} - -// SetRetention sets the Retention field's value. -func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { - s.Retention = v - return s -} - -type GetObjectTaggingInput struct { - _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` - - // The bucket name containing the object for which to get the tagging information. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Object key for which to get the tagging information. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The versionId of the object for which to get the tagging information. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { - s.Bucket = &v - return s -} - -func (s *GetObjectTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { - s.VersionId = &v - return s -} - -func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetObjectTaggingOutput struct { - _ struct{} `type:"structure"` - - // Contains the tag set. - // - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` - - // The versionId of the object for which you got the tagging information. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s GetObjectTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTaggingOutput) GoString() string { - return s.String() -} - -// SetTagSet sets the TagSet field's value. -func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { - s.TagSet = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { - s.VersionId = &v - return s -} - -type GetObjectTorrentInput struct { - _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` - - // The name of the bucket containing the object for which to get the torrent - // files. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The object key for which to get the information. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation -func (s GetObjectTorrentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTorrentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTorrentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { - s.Bucket = &v - return s -} - -func (s *GetObjectTorrentInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { - s.RequestPayer = &v - return s -} - -func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectTorrentInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetObjectTorrentOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - // A Bencoded dictionary as defined by the BitTorrent specification - Body io.ReadCloser `type:"blob"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s GetObjectTorrentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTorrentOutput) GoString() string { - return s.String() -} - -// SetBody sets the Body field's value. -func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { - s.Body = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { - s.RequestCharged = &v - return s -} - -type GetPublicAccessBlockInput struct { - _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` - - // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you - // want to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetPublicAccessBlockInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPublicAccessBlockInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPublicAccessBlockInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { - s.Bucket = &v - return s -} - -func (s *GetPublicAccessBlockInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type GetPublicAccessBlockOutput struct { - _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` - - // The PublicAccessBlock configuration currently in effect for this Amazon S3 - // bucket. - PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetPublicAccessBlockOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPublicAccessBlockOutput) GoString() string { - return s.String() -} - -// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. -func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { - s.PublicAccessBlockConfiguration = v - return s -} - -// Container for Glacier job parameters. -type GlacierJobParameters struct { - _ struct{} `type:"structure"` - - // Glacier retrieval tier at which the restore will be processed. - // - // Tier is a required field - Tier *string `type:"string" required:"true" enum:"Tier"` -} - -// String returns the string representation -func (s GlacierJobParameters) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GlacierJobParameters) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GlacierJobParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} - if s.Tier == nil { - invalidParams.Add(request.NewErrParamRequired("Tier")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTier sets the Tier field's value. -func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { - s.Tier = &v - return s -} - -// Container for grant information. -type Grant struct { - _ struct{} `type:"structure"` - - // The person being granted permissions. - Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Specifies the permission given to the grantee. - Permission *string `type:"string" enum:"Permission"` -} - -// String returns the string representation -func (s Grant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Grant) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Grant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grant"} - if s.Grantee != nil { - if err := s.Grantee.Validate(); err != nil { - invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantee sets the Grantee field's value. -func (s *Grant) SetGrantee(v *Grantee) *Grant { - s.Grantee = v - return s -} - -// SetPermission sets the Permission field's value. -func (s *Grant) SetPermission(v string) *Grant { - s.Permission = &v - return s -} - -// Container for the person being granted permissions. -type Grantee struct { - _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Screen name of the grantee. - DisplayName *string `type:"string"` - - // Email address of the grantee. - EmailAddress *string `type:"string"` - - // The canonical user ID of the grantee. - ID *string `type:"string"` - - // Type of grantee - // - // Type is a required field - Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` - - // URI of the grantee group. - URI *string `type:"string"` -} - -// String returns the string representation -func (s Grantee) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Grantee) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Grantee) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grantee"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDisplayName sets the DisplayName field's value. -func (s *Grantee) SetDisplayName(v string) *Grantee { - s.DisplayName = &v - return s -} - -// SetEmailAddress sets the EmailAddress field's value. -func (s *Grantee) SetEmailAddress(v string) *Grantee { - s.EmailAddress = &v - return s -} - -// SetID sets the ID field's value. -func (s *Grantee) SetID(v string) *Grantee { - s.ID = &v - return s -} - -// SetType sets the Type field's value. -func (s *Grantee) SetType(v string) *Grantee { - s.Type = &v - return s -} - -// SetURI sets the URI field's value. -func (s *Grantee) SetURI(v string) *Grantee { - s.URI = &v - return s -} - -type HeadBucketInput struct { - _ struct{} `locationName:"HeadBucketRequest" type:"structure"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s HeadBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *HeadBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { - s.Bucket = &v - return s -} - -func (s *HeadBucketInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *HeadBucketInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type HeadBucketOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s HeadBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadBucketOutput) GoString() string { - return s.String() -} - -type HeadObjectInput struct { - _ struct{} `locationName:"HeadObjectRequest" type:"structure"` - - // The name of the bucket containing the object. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` - - // The object key. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of the object being read. This is a positive integer between - // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. - // Useful querying about the size of the part and the number of parts in this - // object. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s HeadObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *HeadObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { - s.Bucket = &v - return s -} - -func (s *HeadObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetIfMatch sets the IfMatch field's value. -func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { - s.IfMatch = &v - return s -} - -// SetIfModifiedSince sets the IfModifiedSince field's value. -func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { - s.IfModifiedSince = &v - return s -} - -// SetIfNoneMatch sets the IfNoneMatch field's value. -func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { - s.IfNoneMatch = &v - return s -} - -// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. -func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { - s.IfUnmodifiedSince = &v - return s -} - -// SetKey sets the Key field's value. -func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { - s.PartNumber = &v - return s -} - -// SetRange sets the Range field's value. -func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { - s.Range = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { - s.SSECustomerKey = &v - return s -} - -func (s *HeadObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { - s.VersionId = &v - return s -} - -func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *HeadObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type HeadObjectOutput struct { - _ struct{} `type:"structure"` - - // Indicates that a range of bytes was specified. - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` - - // A map of metadata to store with the object in S3. - // - // By default unmarshaled keys are written as a map keys in following canonicalized format: - // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. - // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - // Specifies whether a legal hold is in effect for this object. This header - // is only returned if the requester has the s3:GetObjectLegalHold permission. - // This header is not returned if the specified version of this object has never - // had a legal hold applied. For more information about S3 Object Lock, see - // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The Object Lock mode, if any, that's in effect for this object. This header - // is only returned if the requester has the s3:GetObjectRetention permission. - // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when the Object Lock retention period expires. This header - // is only returned if the requester has the s3:GetObjectRetention permission. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // The count of parts this object has. - PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` - - // Amazon S3 can return this header if your request involves a bucket that is - // either a source or destination in a replication rule. - // - // In replication, you have a source bucket on which you configure replication - // and destination bucket where Amazon S3 stores object replicas. When you request - // an object (GetObject) or object metadata (HeadObject) from these buckets, - // Amazon S3 will return the x-amz-replication-status header in the response - // as follows: - // - // * If requesting an object from the source bucket — Amazon S3 will return - // the x-amz-replication-status header if the object in your request is eligible - // for replication. For example, suppose that in your replication configuration, - // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects - // with key prefix TaxDocs. Any objects you upload with this key name prefix, - // for example TaxDocs/document1.pdf, are eligible for replication. For any - // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status - // header with value PENDING, COMPLETED or FAILED indicating object replication - // status. - // - // * If requesting an object from the destination bucket — Amazon S3 will - // return the x-amz-replication-status header with value REPLICA if the object - // in your request is a replica that Amazon S3 created. - // - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If the object is an archived object (an object whose storage class is GLACIER), - // the response includes this header if either the archive restoration is in - // progress (see RestoreObject or an archive copy is already restored. - // - // If an archive copy is already restored, the header value indicates when Amazon - // S3 is scheduled to delete the object copy. For example: - // - // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00 - // GMT" - // - // If the object restoration is in progress, the header returns the value ongoing-request="true". - // - // For more information about archiving objects, see Transitioning Objects: - // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // If the object is stored using server-side encryption either with an AWS KMS - // customer master key (CMK) or an Amazon S3-managed encryption key, the response - // includes this header with the value of the server-side encryption algorithm - // used when storing this object in Amazon S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Provides storage class information of the object. Amazon S3 returns this - // header for all objects except for Standard storage class objects. - // - // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s HeadObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadObjectOutput) GoString() string { - return s.String() -} - -// SetAcceptRanges sets the AcceptRanges field's value. -func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { - s.AcceptRanges = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { - s.CacheControl = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { - s.ContentLanguage = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { - s.ContentLength = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { - s.ContentType = &v - return s -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { - s.DeleteMarker = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { - s.Expiration = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { - s.Expires = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { - s.LastModified = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { - s.Metadata = v - return s -} - -// SetMissingMeta sets the MissingMeta field's value. -func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { - s.MissingMeta = &v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetPartsCount sets the PartsCount field's value. -func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { - s.PartsCount = &v - return s -} - -// SetReplicationStatus sets the ReplicationStatus field's value. -func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { - s.ReplicationStatus = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { - s.RequestCharged = &v - return s -} - -// SetRestore sets the Restore field's value. -func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { - s.Restore = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { - s.StorageClass = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { - s.VersionId = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { - s.WebsiteRedirectLocation = &v - return s -} - -// Container for the Suffix element. -type IndexDocument struct { - _ struct{} `type:"structure"` - - // A suffix that is appended to a request that is for a directory on the website - // endpoint (for example,if the suffix is index.html and you make a request - // to samplebucket/images/ the data that is returned will be for the object - // with the key name images/index.html) The suffix must not be empty and must - // not include a slash character. - // - // Suffix is a required field - Suffix *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s IndexDocument) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s IndexDocument) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *IndexDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} - if s.Suffix == nil { - invalidParams.Add(request.NewErrParamRequired("Suffix")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSuffix sets the Suffix field's value. -func (s *IndexDocument) SetSuffix(v string) *IndexDocument { - s.Suffix = &v - return s -} - -// Container element that identifies who initiated the multipart upload. -type Initiator struct { - _ struct{} `type:"structure"` - - // Name of the Principal. - DisplayName *string `type:"string"` - - // If the principal is an AWS account, it provides the Canonical User ID. If - // the principal is an IAM User, it provides a user ARN value. - ID *string `type:"string"` -} - -// String returns the string representation -func (s Initiator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Initiator) GoString() string { - return s.String() -} - -// SetDisplayName sets the DisplayName field's value. -func (s *Initiator) SetDisplayName(v string) *Initiator { - s.DisplayName = &v - return s -} - -// SetID sets the ID field's value. -func (s *Initiator) SetID(v string) *Initiator { - s.ID = &v - return s -} - -// Describes the serialization format of the object. -type InputSerialization struct { - _ struct{} `type:"structure"` - - // Describes the serialization of a CSV-encoded object. - CSV *CSVInput `type:"structure"` - - // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default - // Value: NONE. - CompressionType *string `type:"string" enum:"CompressionType"` - - // Specifies JSON as object's input serialization format. - JSON *JSONInput `type:"structure"` - - // Specifies Parquet as object's input serialization format. - Parquet *ParquetInput `type:"structure"` -} - -// String returns the string representation -func (s InputSerialization) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputSerialization) GoString() string { - return s.String() -} - -// SetCSV sets the CSV field's value. -func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { - s.CSV = v - return s -} - -// SetCompressionType sets the CompressionType field's value. -func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { - s.CompressionType = &v - return s -} - -// SetJSON sets the JSON field's value. -func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { - s.JSON = v - return s -} - -// SetParquet sets the Parquet field's value. -func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { - s.Parquet = v - return s -} - -// Specifies the inventory configuration for an Amazon S3 bucket. For more information, -// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) -// in the Amazon Simple Storage Service API Reference. -type InventoryConfiguration struct { - _ struct{} `type:"structure"` - - // Contains information about where to publish the inventory results. - // - // Destination is a required field - Destination *InventoryDestination `type:"structure" required:"true"` - - // Specifies an inventory filter. The inventory only includes objects that meet - // the filter's criteria. - Filter *InventoryFilter `type:"structure"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // Object versions to include in the inventory list. If set to All, the list - // includes all the object versions, which adds the version-related fields VersionId, - // IsLatest, and DeleteMarker to the list. If set to Current, the list does - // not contain these version-related fields. - // - // IncludedObjectVersions is a required field - IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` - - // Specifies whether the inventory is enabled or disabled. If set to True, an - // inventory list is generated. If set to False, no inventory list is generated. - // - // IsEnabled is a required field - IsEnabled *bool `type:"boolean" required:"true"` - - // Contains the optional fields that are included in the inventory results. - OptionalFields []*string `locationNameList:"Field" type:"list"` - - // Specifies the schedule for generating inventory results. - // - // Schedule is a required field - Schedule *InventorySchedule `type:"structure" required:"true"` -} - -// String returns the string representation -func (s InventoryConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventoryConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.IncludedObjectVersions == nil { - invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) - } - if s.IsEnabled == nil { - invalidParams.Add(request.NewErrParamRequired("IsEnabled")) - } - if s.Schedule == nil { - invalidParams.Add(request.NewErrParamRequired("Schedule")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - if s.Schedule != nil { - if err := s.Schedule.Validate(); err != nil { - invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDestination sets the Destination field's value. -func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { - s.Destination = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { - s.Id = &v - return s -} - -// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. -func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { - s.IncludedObjectVersions = &v - return s -} - -// SetIsEnabled sets the IsEnabled field's value. -func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { - s.IsEnabled = &v - return s -} - -// SetOptionalFields sets the OptionalFields field's value. -func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { - s.OptionalFields = v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { - s.Schedule = v - return s -} - -// Specifies the inventory configuration for an Amazon S3 bucket. -type InventoryDestination struct { - _ struct{} `type:"structure"` - - // Contains the bucket name, file format, bucket owner (optional), and prefix - // (optional) where inventory results are published. - // - // S3BucketDestination is a required field - S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` -} - -// String returns the string representation -func (s InventoryDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventoryDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} - if s.S3BucketDestination == nil { - invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) - } - if s.S3BucketDestination != nil { - if err := s.S3BucketDestination.Validate(); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3BucketDestination sets the S3BucketDestination field's value. -func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { - s.S3BucketDestination = v - return s -} - -// Contains the type of server-side encryption used to encrypt the inventory -// results. -type InventoryEncryption struct { - _ struct{} `type:"structure"` - - // Specifies the use of SSE-KMS to encrypt delivered inventory reports. - SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` - - // Specifies the use of SSE-S3 to encrypt delivered inventory reports. - SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` -} - -// String returns the string representation -func (s InventoryEncryption) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventoryEncryption) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryEncryption) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} - if s.SSEKMS != nil { - if err := s.SSEKMS.Validate(); err != nil { - invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSSEKMS sets the SSEKMS field's value. -func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { - s.SSEKMS = v - return s -} - -// SetSSES3 sets the SSES3 field's value. -func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { - s.SSES3 = v - return s -} - -// Specifies an inventory filter. The inventory only includes objects that meet -// the filter's criteria. -type InventoryFilter struct { - _ struct{} `type:"structure"` - - // The prefix that an object must have to be included in the inventory results. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s InventoryFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventoryFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { - s.Prefix = &v - return s -} - -// Contains the bucket name, file format, bucket owner (optional), and prefix -// (optional) where inventory results are published. -type InventoryS3BucketDestination struct { - _ struct{} `type:"structure"` - - // The ID of the account that owns the destination bucket. - AccountId *string `type:"string"` - - // The Amazon Resource Name (ARN) of the bucket where inventory results will - // be published. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // Contains the type of server-side encryption used to encrypt the inventory - // results. - Encryption *InventoryEncryption `type:"structure"` - - // Specifies the output format of the inventory results. - // - // Format is a required field - Format *string `type:"string" required:"true" enum:"InventoryFormat"` - - // The prefix that is prepended to all inventory results. - Prefix *string `type:"string"` -} - -// String returns the string representation -func (s InventoryS3BucketDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventoryS3BucketDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryS3BucketDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - if s.Encryption != nil { - if err := s.Encryption.Validate(); err != nil { - invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { - s.AccountId = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { - s.Bucket = &v - return s -} - -func (s *InventoryS3BucketDestination) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetEncryption sets the Encryption field's value. -func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { - s.Encryption = v - return s -} - -// SetFormat sets the Format field's value. -func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { - s.Format = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { - s.Prefix = &v - return s -} - -// Specifies the schedule for generating inventory results. -type InventorySchedule struct { - _ struct{} `type:"structure"` - - // Specifies how frequently inventory results are produced. - // - // Frequency is a required field - Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` -} - -// String returns the string representation -func (s InventorySchedule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InventorySchedule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventorySchedule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} - if s.Frequency == nil { - invalidParams.Add(request.NewErrParamRequired("Frequency")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFrequency sets the Frequency field's value. -func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { - s.Frequency = &v - return s -} - -// Specifies JSON as object's input serialization format. -type JSONInput struct { - _ struct{} `type:"structure"` - - // The type of JSON. Valid values: Document, Lines. - Type *string `type:"string" enum:"JSONType"` -} - -// String returns the string representation -func (s JSONInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s JSONInput) GoString() string { - return s.String() -} - -// SetType sets the Type field's value. -func (s *JSONInput) SetType(v string) *JSONInput { - s.Type = &v - return s -} - -// Specifies JSON as request's output serialization format. -type JSONOutput struct { - _ struct{} `type:"structure"` - - // The value used to separate individual records in the output. - RecordDelimiter *string `type:"string"` -} - -// String returns the string representation -func (s JSONOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s JSONOutput) GoString() string { - return s.String() -} - -// SetRecordDelimiter sets the RecordDelimiter field's value. -func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { - s.RecordDelimiter = &v - return s -} - -// A container for object key name prefix and suffix filtering rules. -type KeyFilter struct { - _ struct{} `type:"structure"` - - // A list of containers for the key-value pair that defines the criteria for - // the filter rule. - FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s KeyFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s KeyFilter) GoString() string { - return s.String() -} - -// SetFilterRules sets the FilterRules field's value. -func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { - s.FilterRules = v - return s -} - -// A container for specifying the configuration for AWS Lambda notifications. -type LambdaFunctionConfiguration struct { - _ struct{} `type:"structure"` - - // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For - // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Filter *NotificationConfigurationFilter `type:"structure"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 - // invokes when the specified event type occurs. - // - // LambdaFunctionArn is a required field - LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` -} - -// String returns the string representation -func (s LambdaFunctionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LambdaFunctionConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LambdaFunctionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.LambdaFunctionArn == nil { - invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEvents sets the Events field's value. -func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { - s.Events = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { - s.Id = &v - return s -} - -// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. -func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { - s.LambdaFunctionArn = &v - return s -} - -// Container for lifecycle rules. You can add as many as 1000 rules. -type LifecycleConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies lifecycle configuration rules for an Amazon S3 bucket. - // - // Rules is a required field - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s LifecycleConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRules sets the Rules field's value. -func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { - s.Rules = v - return s -} - -// Container for the expiration for the lifecycle of the object. -type LifecycleExpiration struct { - _ struct{} `type:"structure"` - - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int64 `type:"integer"` - - // Indicates whether Amazon S3 will remove a delete marker with no noncurrent - // versions. If set to true, the delete marker will be expired; if set to false - // the policy takes no action. This cannot be specified with Days or Date in - // a Lifecycle Expiration Policy. - ExpiredObjectDeleteMarker *bool `type:"boolean"` -} - -// String returns the string representation -func (s LifecycleExpiration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleExpiration) GoString() string { - return s.String() -} - -// SetDate sets the Date field's value. -func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { - s.Date = &v - return s -} - -// SetDays sets the Days field's value. -func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { - s.Days = &v - return s -} - -// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. -func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { - s.ExpiredObjectDeleteMarker = &v - return s -} - -// A lifecycle rule for individual objects in an Amazon S3 bucket. -type LifecycleRule struct { - _ struct{} `type:"structure"` - - // Specifies the days since the initiation of an incomplete multipart upload - // that Amazon S3 will wait before permanently removing all parts of the upload. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon Simple Storage Service Developer Guide. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - // Specifies the expiration for the lifecycle of the object in the form of date, - // days and, whether the object has a delete marker. - Expiration *LifecycleExpiration `type:"structure"` - - // The Filter is used to identify objects that a Lifecycle Rule applies to. - // A Filter must have exactly one of Prefix, Tag, or And specified. - Filter *LifecycleRuleFilter `type:"structure"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - // Specifies the transition rule for the lifecycle rule that describes when - // noncurrent objects transition to a specific storage class. If your bucket - // is versioning-enabled (or versioning is suspended), you can set this action - // to request that Amazon S3 transition noncurrent object versions to a specific - // storage class at a set period in the object's lifetime. - NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` - - // Prefix identifying one or more objects to which the rule applies. This is - // No longer used; use Filter instead. - // - // Deprecated: Prefix has been deprecated - Prefix *string `deprecated:"true" type:"string"` - - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - // Specifies when an Amazon S3 object transitions to a specified storage class. - Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s LifecycleRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. -func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { - s.AbortIncompleteMultipartUpload = v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { - s.Expiration = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { - s.Filter = v - return s -} - -// SetID sets the ID field's value. -func (s *LifecycleRule) SetID(v string) *LifecycleRule { - s.ID = &v - return s -} - -// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. -func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { - s.NoncurrentVersionExpiration = v - return s -} - -// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. -func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { - s.NoncurrentVersionTransitions = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { - s.Prefix = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { - s.Status = &v - return s -} - -// SetTransitions sets the Transitions field's value. -func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { - s.Transitions = v - return s -} - -// This is used in a Lifecycle Rule Filter to apply a logical AND to two or -// more predicates. The Lifecycle Rule will apply to any object matching all -// of the predicates configured inside the And operator. -type LifecycleRuleAndOperator struct { - _ struct{} `type:"structure"` - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string `type:"string"` - - // All of these tags must exist in the object's tag set in order for the rule - // to apply. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s LifecycleRuleAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleRuleAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRuleAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { - s.Tags = v - return s -} - -// The Filter is used to identify objects that a Lifecycle Rule applies to. -// A Filter must have exactly one of Prefix, Tag, or And specified. -type LifecycleRuleFilter struct { - _ struct{} `type:"structure"` - - // This is used in a Lifecycle Rule Filter to apply a logical AND to two or - // more predicates. The Lifecycle Rule will apply to any object matching all - // of the predicates configured inside the And operator. - And *LifecycleRuleAndOperator `type:"structure"` - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string `type:"string"` - - // This tag must exist in the object's tag set in order for the rule to apply. - Tag *Tag `type:"structure"` -} - -// String returns the string representation -func (s LifecycleRuleFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleRuleFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRuleFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { - s.Tag = v - return s -} - -type ListBucketAnalyticsConfigurationsInput struct { - _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` - - // The name of the bucket from which analytics configurations are retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` -} - -// String returns the string representation -func (s ListBucketAnalyticsConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketAnalyticsConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { - s.Bucket = &v - return s -} - -func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { - s.ContinuationToken = &v - return s -} - -func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type ListBucketAnalyticsConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // The list of analytics configurations for a bucket. - AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - - // The marker that is used as a starting point for this analytics configuration - // list response. This value is present if it was sent in the request. - ContinuationToken *string `type:"string"` - - // Indicates whether the returned list of analytics configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // will be provided for a subsequent request. - IsTruncated *bool `type:"boolean"` - - // NextContinuationToken is sent when isTruncated is true, which indicates that - // there are more analytics configurations to list. The next request must include - // this NextContinuationToken. The token is obfuscated and is not a usable value. - NextContinuationToken *string `type:"string"` -} - -// String returns the string representation -func (s ListBucketAnalyticsConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { - return s.String() -} - -// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { - s.AnalyticsConfigurationList = v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { - s.ContinuationToken = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { - s.IsTruncated = &v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { - s.NextContinuationToken = &v - return s -} - -type ListBucketInventoryConfigurationsInput struct { - _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` - - // The name of the bucket containing the inventory configurations to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The marker used to continue an inventory configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value - // that Amazon S3 understands. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` -} - -// String returns the string representation -func (s ListBucketInventoryConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketInventoryConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketInventoryConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { - s.Bucket = &v - return s -} - -func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { - s.ContinuationToken = &v - return s -} - -func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type ListBucketInventoryConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // If sent in the request, the marker that is used as a starting point for this - // inventory configuration list response. - ContinuationToken *string `type:"string"` - - // The list of inventory configurations for a bucket. - InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` - - // Tells whether the returned list of inventory configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // is provided for a subsequent request. - IsTruncated *bool `type:"boolean"` - - // The marker used to continue this inventory configuration listing. Use the - // NextContinuationToken from this response to continue the listing in a subsequent - // request. The continuation token is an opaque value that Amazon S3 understands. - NextContinuationToken *string `type:"string"` -} - -// String returns the string representation -func (s ListBucketInventoryConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketInventoryConfigurationsOutput) GoString() string { - return s.String() -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { - s.ContinuationToken = &v - return s -} - -// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { - s.InventoryConfigurationList = v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { - s.IsTruncated = &v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { - s.NextContinuationToken = &v - return s -} - -type ListBucketMetricsConfigurationsInput struct { - _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` - - // The name of the bucket containing the metrics configurations to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The marker that is used to continue a metrics configuration listing that - // has been truncated. Use the NextContinuationToken from a previously truncated - // list response to continue the listing. The continuation token is an opaque - // value that Amazon S3 understands. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` -} - -// String returns the string representation -func (s ListBucketMetricsConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketMetricsConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketMetricsConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { - s.Bucket = &v - return s -} - -func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { - s.ContinuationToken = &v - return s -} - -func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type ListBucketMetricsConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // The marker that is used as a starting point for this metrics configuration - // list response. This value is present if it was sent in the request. - ContinuationToken *string `type:"string"` - - // Indicates whether the returned list of metrics configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // will be provided for a subsequent request. - IsTruncated *bool `type:"boolean"` - - // The list of metrics configurations for a bucket. - MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` - - // The marker used to continue a metrics configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value - // that Amazon S3 understands. - NextContinuationToken *string `type:"string"` -} - -// String returns the string representation -func (s ListBucketMetricsConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketMetricsConfigurationsOutput) GoString() string { - return s.String() -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { - s.ContinuationToken = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { - s.IsTruncated = &v - return s -} - -// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { - s.MetricsConfigurationList = v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { - s.NextContinuationToken = &v - return s -} - -type ListBucketsInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s ListBucketsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketsInput) GoString() string { - return s.String() -} - -type ListBucketsOutput struct { - _ struct{} `type:"structure"` - - // The list of buckets owned by the requestor. - Buckets []*Bucket `locationNameList:"Bucket" type:"list"` - - // The owner of the buckets listed. - Owner *Owner `type:"structure"` -} - -// String returns the string representation -func (s ListBucketsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketsOutput) GoString() string { - return s.String() -} - -// SetBuckets sets the Buckets field's value. -func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { - s.Buckets = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { - s.Owner = v - return s -} - -type ListMultipartUploadsInput struct { - _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` - - // Name of the bucket to which the multipart upload was initiated. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Character you use to group keys. - // - // All keys that contain the same string between the prefix, if specified, and - // the first occurrence of the delimiter after the prefix are grouped under - // a single result element, CommonPrefixes. If you don't specify the prefix - // parameter, then the substring starts at the beginning of the key. The keys - // that are grouped under CommonPrefixes result element are not returned elsewhere - // in the response. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Together with upload-id-marker, this parameter specifies the multipart upload - // after which listing should begin. - // - // If upload-id-marker is not specified, only the keys lexicographically greater - // than the specified key-marker will be included in the list. - // - // If upload-id-marker is specified, any multipart uploads for a key equal to - // the key-marker might also be included, provided those multipart uploads have - // upload IDs lexicographically greater than the specified upload-id-marker. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of multipart uploads, from 1 to 1,000, to return - // in the response body. 1,000 is the maximum number of uploads that can be - // returned in a response. - MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` - - // Lists in-progress uploads only for those keys that begin with the specified - // prefix. You can use prefixes to separate a bucket into different grouping - // of keys. (You can think of using prefix to make groups in the same way you'd - // use a folder in a file system.) - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker - // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker. - UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` -} - -// String returns the string representation -func (s ListMultipartUploadsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListMultipartUploadsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListMultipartUploadsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { - s.Bucket = &v - return s -} - -func (s *ListMultipartUploadsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { - s.EncodingType = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { - s.KeyMarker = &v - return s -} - -// SetMaxUploads sets the MaxUploads field's value. -func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { - s.MaxUploads = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { - s.Prefix = &v - return s -} - -// SetUploadIdMarker sets the UploadIdMarker field's value. -func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { - s.UploadIdMarker = &v - return s -} - -func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListMultipartUploadsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type ListMultipartUploadsOutput struct { - _ struct{} `type:"structure"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` - - // If you specify a delimiter in the request, then the result returns each distinct - // key prefix containing the delimiter in a CommonPrefixes element. The distinct - // key prefixes are returned in the Prefix child element. - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Contains the delimiter you specified in the request. If you don't specify - // a delimiter in your request, this element is absent from the response. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - // - // If you specify encoding-type request parameter, Amazon S3 includes this element - // in the response, and returns encoded key name values in the following response - // elements: - // - // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. - EncodingType *string `type:"string" enum:"EncodingType"` - - // Indicates whether the returned list of multipart uploads is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of multipart uploads exceeds the limit allowed or specified - // by max uploads. - IsTruncated *bool `type:"boolean"` - - // The key at or after which the listing began. - KeyMarker *string `type:"string"` - - // Maximum number of multipart uploads that could have been included in the - // response. - MaxUploads *int64 `type:"integer"` - - // When a list is truncated, this element specifies the value that should be - // used for the key-marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // When a list is truncated, this element specifies the value that should be - // used for the upload-id-marker request parameter in a subsequent request. - NextUploadIdMarker *string `type:"string"` - - // When a prefix is provided in the request, this field contains the specified - // prefix. The result contains only keys starting with the specified prefix. - Prefix *string `type:"string"` - - // Upload ID after which listing began. - UploadIdMarker *string `type:"string"` - - // Container for elements related to a particular multipart upload. A response - // can contain zero or more Upload elements. - Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s ListMultipartUploadsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListMultipartUploadsOutput) GoString() string { - return s.String() -} - -// SetBucket sets the Bucket field's value. -func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { - s.Bucket = &v - return s -} - -func (s *ListMultipartUploadsOutput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { - s.CommonPrefixes = v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { - s.IsTruncated = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { - s.KeyMarker = &v - return s -} - -// SetMaxUploads sets the MaxUploads field's value. -func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { - s.MaxUploads = &v - return s -} - -// SetNextKeyMarker sets the NextKeyMarker field's value. -func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { - s.NextKeyMarker = &v - return s -} - -// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. -func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { - s.NextUploadIdMarker = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { - s.Prefix = &v - return s -} - -// SetUploadIdMarker sets the UploadIdMarker field's value. -func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { - s.UploadIdMarker = &v - return s -} - -// SetUploads sets the Uploads field's value. -func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { - s.Uploads = v - return s -} - -type ListObjectVersionsInput struct { - _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` - - // The bucket name that contains the objects. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A delimiter is a character that you specify to group keys. All keys that - // contain the same string between the prefix and the first occurrence of the - // delimiter are grouped under a single result element in CommonPrefixes. These - // groups are counted as one result against the max-keys limitation. These keys - // are not returned elsewhere in the response. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Specifies the key to start with when listing objects in a bucket. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. If additional keys satisfy - // the search criteria, but were not returned because max-keys was exceeded, - // the response contains true. To return the additional - // keys, see key-marker and version-id-marker. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Use this parameter to select only those keys that begin with the specified - // prefix. You can use prefixes to separate a bucket into different groupings - // of keys. (You can think of using prefix to make groups in the same way you'd - // use a folder in a file system.) You can use prefix with delimiter to roll - // up numerous objects into a single result under CommonPrefixes. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Specifies the object version you want to start listing from. - VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` -} - -// String returns the string representation -func (s ListObjectVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectVersionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { - s.Bucket = &v - return s -} - -func (s *ListObjectVersionsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { - s.EncodingType = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { - s.KeyMarker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { - s.MaxKeys = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { - s.Prefix = &v - return s -} - -// SetVersionIdMarker sets the VersionIdMarker field's value. -func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { - s.VersionIdMarker = &v - return s -} - -func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListObjectVersionsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type ListObjectVersionsOutput struct { - _ struct{} `type:"structure"` - - // All of the keys rolled up into a common prefix count as a single return when - // calculating the number of returns. - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Container for an object that is a delete marker. - DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` - - // The delimiter grouping the included keys. A delimiter is a character that - // you specify to group keys. All keys that contain the same string between - // the prefix and the first occurrence of the delimiter are grouped under a - // single result element in CommonPrefixes. These groups are counted as one - // result against the max-keys limitation. These keys are not returned elsewhere - // in the response. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object key names in the XML response. - // - // If you specify encoding-type request parameter, Amazon S3 includes this element - // in the response, and returns encoded key name values in the following response - // elements: - // - // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether Amazon S3 returned all of the results that - // satisfied the search criteria. If your results were truncated, you can make - // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker - // response parameters as a starting place in another request to return the - // rest of the results. - IsTruncated *bool `type:"boolean"` - - // Marks the last key returned in a truncated response. - KeyMarker *string `type:"string"` - - // Specifies the maximum number of objects to return. - MaxKeys *int64 `type:"integer"` - - // Bucket name. - Name *string `type:"string"` - - // When the number of responses exceeds the value of MaxKeys, NextKeyMarker - // specifies the first key not returned that satisfies the search criteria. - // Use this value for the key-marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker - // specifies the first object version not returned that satisfies the search - // criteria. Use this value for the version-id-marker request parameter in a - // subsequent request. - NextVersionIdMarker *string `type:"string"` - - // Selects objects that start with the value supplied by this parameter. - Prefix *string `type:"string"` - - // Marks the last version of the key returned in a truncated response. - VersionIdMarker *string `type:"string"` - - // Container for version information. - Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s ListObjectVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectVersionsOutput) GoString() string { - return s.String() -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { - s.CommonPrefixes = v - return s -} - -// SetDeleteMarkers sets the DeleteMarkers field's value. -func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { - s.DeleteMarkers = v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { - s.IsTruncated = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { - s.KeyMarker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { - s.MaxKeys = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { - s.Name = &v - return s -} - -// SetNextKeyMarker sets the NextKeyMarker field's value. -func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { - s.NextKeyMarker = &v - return s -} - -// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. -func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { - s.NextVersionIdMarker = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { - s.Prefix = &v - return s -} - -// SetVersionIdMarker sets the VersionIdMarker field's value. -func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { - s.VersionIdMarker = &v - return s -} - -// SetVersions sets the Versions field's value. -func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { - s.Versions = v - return s -} - -type ListObjectsInput struct { - _ struct{} `locationName:"ListObjectsRequest" type:"structure"` - - // The name of the bucket containing the objects. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Specifies the key to start with when listing objects in a bucket. - Marker *string `location:"querystring" locationName:"marker" type:"string"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // list objects request. Bucket owners need not specify this parameter in their - // requests. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation -func (s ListObjectsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { - s.Bucket = &v - return s -} - -func (s *ListObjectsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { - s.EncodingType = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { - s.Marker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { - s.MaxKeys = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { - s.Prefix = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { - s.RequestPayer = &v - return s -} - -func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListObjectsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type ListObjectsOutput struct { - _ struct{} `type:"structure"` - - // All of the keys rolled up in a common prefix count as a single return when - // calculating the number of returns. - // - // A response can contain CommonPrefixes only if you specify a delimiter. - // - // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by the delimiter. - // - // CommonPrefixes lists keys that act like subdirectories in the directory specified - // by Prefix. - // - // For example, if the prefix is notes/ and the delimiter is a slash (/) as - // in notes/summer/july, the common prefix is notes/summer/. All of the keys - // that roll up into a common prefix count as a single return when calculating - // the number of returns. - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Metadata about each object returned. - Contents []*Object `type:"list" flattened:"true"` - - // Causes keys that contain the same string between the prefix and the first - // occurrence of the delimiter to be rolled up into a single result element - // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere - // in the response. Each rolled-up result counts as only one return against - // the MaxKeys value. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether Amazon S3 returned all of the results that - // satisfied the search criteria. - IsTruncated *bool `type:"boolean"` - - // Indicates where in the bucket listing begins. Marker is included in the response - // if it was sent with the request. - Marker *string `type:"string"` - - // The maximum number of keys returned in the response body. - MaxKeys *int64 `type:"integer"` - - // Bucket name. - Name *string `type:"string"` - - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Amazon S3 lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - NextMarker *string `type:"string"` - - // Keys that begin with the indicated prefix. - Prefix *string `type:"string"` -} - -// String returns the string representation -func (s ListObjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsOutput) GoString() string { - return s.String() -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { - s.CommonPrefixes = v - return s -} - -// SetContents sets the Contents field's value. -func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { - s.Contents = v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { - s.IsTruncated = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { - s.Marker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { - s.MaxKeys = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { - s.Name = &v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { - s.NextMarker = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { - s.Prefix = &v - return s -} - -type ListObjectsV2Input struct { - _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` - - // Bucket name to list. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // The owner field is not present in listV2 by default, if you want to return - // owner field with each key in the result then set the fetch owner field to - // true. - FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // list objects request in V2 style. Bucket owners need not specify this parameter - // in their requests. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket. - StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` -} - -// String returns the string representation -func (s ListObjectsV2Input) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsV2Input) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsV2Input) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { - s.Bucket = &v - return s -} - -func (s *ListObjectsV2Input) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { - s.ContinuationToken = &v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { - s.EncodingType = &v - return s -} - -// SetFetchOwner sets the FetchOwner field's value. -func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { - s.FetchOwner = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { - s.MaxKeys = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { - s.Prefix = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { - s.RequestPayer = &v - return s -} - -// SetStartAfter sets the StartAfter field's value. -func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { - s.StartAfter = &v - return s -} - -func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListObjectsV2Input) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type ListObjectsV2Output struct { - _ struct{} `type:"structure"` - - // All of the keys rolled up into a common prefix count as a single return when - // calculating the number of returns. - // - // A response can contain CommonPrefixes only if you specify a delimiter. - // - // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by a delimiter. - // - // CommonPrefixes lists keys that act like subdirectories in the directory specified - // by Prefix. - // - // For example, if the prefix is notes/ and the delimiter is a slash (/) as - // in notes/summer/july, the common prefix is notes/summer/. All of the keys - // that roll up into a common prefix count as a single return when calculating - // the number of returns. - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Metadata about each object returned. - Contents []*Object `type:"list" flattened:"true"` - - // If ContinuationToken was sent with the request, it is included in the response. - ContinuationToken *string `type:"string"` - - // Causes keys that contain the same string between the prefix and the first - // occurrence of the delimiter to be rolled up into a single result element - // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere - // in the response. Each rolled-up result counts as only one return against - // the MaxKeys value. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object key names in the XML response. - // - // If you specify the encoding-type request parameter, Amazon S3 includes this - // element in the response, and returns encoded key name values in the following - // response elements: - // - // Delimiter, Prefix, Key, and StartAfter. - EncodingType *string `type:"string" enum:"EncodingType"` - - // Set to false if all of the results were returned. Set to true if more keys - // are available to return. If the number of results exceeds that specified - // by MaxKeys, all of the results might not be returned. - IsTruncated *bool `type:"boolean"` - - // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than equals to MaxKeys field. Say you ask for 50 keys, your - // result will include less than equals 50 keys - KeyCount *int64 `type:"integer"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `type:"integer"` - - // Bucket name. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - Name *string `type:"string"` - - // NextContinuationToken is sent when isTruncated is true, which means there - // are more keys in the bucket that can be listed. The next list requests to - // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken - // is obfuscated and is not a real key - NextContinuationToken *string `type:"string"` - - // Keys that begin with the indicated prefix. - Prefix *string `type:"string"` - - // If StartAfter was sent with the request, it is included in the response. - StartAfter *string `type:"string"` -} - -// String returns the string representation -func (s ListObjectsV2Output) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsV2Output) GoString() string { - return s.String() -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { - s.CommonPrefixes = v - return s -} - -// SetContents sets the Contents field's value. -func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { - s.Contents = v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { - s.ContinuationToken = &v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { - s.IsTruncated = &v - return s -} - -// SetKeyCount sets the KeyCount field's value. -func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { - s.KeyCount = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { - s.MaxKeys = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { - s.Name = &v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { - s.NextContinuationToken = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { - s.Prefix = &v - return s -} - -// SetStartAfter sets the StartAfter field's value. -func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { - s.StartAfter = &v - return s -} - -type ListPartsInput struct { - _ struct{} `locationName:"ListPartsRequest" type:"structure"` - - // Name of the bucket to which the parts are being uploaded. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Object key for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Sets the maximum number of parts to return. - MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Upload ID identifying the multipart upload whose parts are being listed. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s ListPartsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListPartsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListPartsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { - s.Bucket = &v - return s -} - -func (s *ListPartsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *ListPartsInput) SetKey(v string) *ListPartsInput { - s.Key = &v - return s -} - -// SetMaxParts sets the MaxParts field's value. -func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { - s.MaxParts = &v - return s -} - -// SetPartNumberMarker sets the PartNumberMarker field's value. -func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { - s.PartNumberMarker = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { - s.RequestPayer = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { - s.UploadId = &v - return s -} - -func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListPartsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type ListPartsOutput struct { - _ struct{} `type:"structure"` - - // If the bucket has a lifecycle rule configured with an action to abort incomplete - // multipart uploads and the prefix in the lifecycle rule matches the object - // name in the request, then the response includes this header indicating when - // the initiated multipart upload will become eligible for abort operation. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). - // - // The response will also include the x-amz-abort-rule-id header that will provide - // the ID of the lifecycle configuration rule that defines this action. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - - // This header is returned along with the x-amz-abort-date header. It identifies - // applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` - - // Container element that identifies who initiated the multipart upload. If - // the initiator is an AWS account, this element provides the same information - // as the Owner element. If the initiator is an IAM User, this element provides - // the user ARN and display name. - Initiator *Initiator `type:"structure"` - - // Indicates whether the returned list of parts is truncated. A true value indicates - // that the list was truncated. A list can be truncated if the number of parts - // exceeds the limit returned in the MaxParts element. - IsTruncated *bool `type:"boolean"` - - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - // Maximum number of parts that were allowed in the response. - MaxParts *int64 `type:"integer"` - - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - NextPartNumberMarker *int64 `type:"integer"` - - // Container element that identifies the object owner, after the object is created. - // If multipart upload is initiated by an IAM user, this element provides the - // parent account ID and display name. - Owner *Owner `type:"structure"` - - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - PartNumberMarker *int64 `type:"integer"` - - // Container for elements related to a particular part. A response can contain - // zero or more Part elements. - Parts []*Part `locationName:"Part" type:"list" flattened:"true"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded - // object. - StorageClass *string `type:"string" enum:"StorageClass"` - - // Upload ID identifying the multipart upload whose parts are being listed. - UploadId *string `type:"string"` -} - -// String returns the string representation -func (s ListPartsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListPartsOutput) GoString() string { - return s.String() -} - -// SetAbortDate sets the AbortDate field's value. -func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { - s.AbortDate = &v - return s -} - -// SetAbortRuleId sets the AbortRuleId field's value. -func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { - s.AbortRuleId = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { - s.Bucket = &v - return s -} - -func (s *ListPartsOutput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetInitiator sets the Initiator field's value. -func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { - s.Initiator = v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { - s.IsTruncated = &v - return s -} - -// SetKey sets the Key field's value. -func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { - s.Key = &v - return s -} - -// SetMaxParts sets the MaxParts field's value. -func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { - s.MaxParts = &v - return s -} - -// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. -func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { - s.NextPartNumberMarker = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { - s.Owner = v - return s -} - -// SetPartNumberMarker sets the PartNumberMarker field's value. -func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { - s.PartNumberMarker = &v - return s -} - -// SetParts sets the Parts field's value. -func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { - s.Parts = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { - s.RequestCharged = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { - s.StorageClass = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { - s.UploadId = &v - return s -} - -// Describes an Amazon S3 location that will receive the results of the restore -// request. -type Location struct { - _ struct{} `type:"structure"` - - // A list of grants that control access to the staged results. - AccessControlList []*Grant `locationNameList:"Grant" type:"list"` - - // The name of the bucket where the restore results will be placed. - // - // BucketName is a required field - BucketName *string `type:"string" required:"true"` - - // The canned ACL to apply to the restore results. - CannedACL *string `type:"string" enum:"ObjectCannedACL"` - - // Contains the type of server-side encryption used. - Encryption *Encryption `type:"structure"` - - // The prefix that is prepended to the restore results for this request. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` - - // The class of storage used to store the restore results. - StorageClass *string `type:"string" enum:"StorageClass"` - - // The tag-set that is applied to the restore results. - Tagging *Tagging `type:"structure"` - - // A list of metadata to store with the restore results in S3. - UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` -} - -// String returns the string representation -func (s Location) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Location) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Location) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Location"} - if s.BucketName == nil { - invalidParams.Add(request.NewErrParamRequired("BucketName")) - } - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.AccessControlList != nil { - for i, v := range s.AccessControlList { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Encryption != nil { - if err := s.Encryption.Validate(); err != nil { - invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) - } - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessControlList sets the AccessControlList field's value. -func (s *Location) SetAccessControlList(v []*Grant) *Location { - s.AccessControlList = v - return s -} - -// SetBucketName sets the BucketName field's value. -func (s *Location) SetBucketName(v string) *Location { - s.BucketName = &v - return s -} - -// SetCannedACL sets the CannedACL field's value. -func (s *Location) SetCannedACL(v string) *Location { - s.CannedACL = &v - return s -} - -// SetEncryption sets the Encryption field's value. -func (s *Location) SetEncryption(v *Encryption) *Location { - s.Encryption = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *Location) SetPrefix(v string) *Location { - s.Prefix = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Location) SetStorageClass(v string) *Location { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *Location) SetTagging(v *Tagging) *Location { - s.Tagging = v - return s -} - -// SetUserMetadata sets the UserMetadata field's value. -func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { - s.UserMetadata = v - return s -} - -// Describes where logs are stored and the prefix that Amazon S3 assigns to -// all log object keys for a bucket. For more information, see PUT Bucket logging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) -// in the Amazon Simple Storage Service API Reference. -type LoggingEnabled struct { - _ struct{} `type:"structure"` - - // Specifies the bucket where you want Amazon S3 to store server access logs. - // You can have your logs delivered to any bucket that you own, including the - // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case, you should - // choose a different TargetPrefix for each source bucket so that the delivered - // log files can be distinguished by key. - // - // TargetBucket is a required field - TargetBucket *string `type:"string" required:"true"` - - // Container for granting information. - TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - - // A prefix for all log object keys. If you store log files from multiple Amazon - // S3 buckets in a single bucket, you can use a prefix to distinguish which - // log files came from which bucket. - // - // TargetPrefix is a required field - TargetPrefix *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s LoggingEnabled) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LoggingEnabled) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LoggingEnabled) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} - if s.TargetBucket == nil { - invalidParams.Add(request.NewErrParamRequired("TargetBucket")) - } - if s.TargetPrefix == nil { - invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) - } - if s.TargetGrants != nil { - for i, v := range s.TargetGrants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTargetBucket sets the TargetBucket field's value. -func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { - s.TargetBucket = &v - return s -} - -// SetTargetGrants sets the TargetGrants field's value. -func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { - s.TargetGrants = v - return s -} - -// SetTargetPrefix sets the TargetPrefix field's value. -func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { - s.TargetPrefix = &v - return s -} - -// A metadata key-value pair to store with an object. -type MetadataEntry struct { - _ struct{} `type:"structure"` - - // Name of the Object. - Name *string `type:"string"` - - // Value of the Object. - Value *string `type:"string"` -} - -// String returns the string representation -func (s MetadataEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetadataEntry) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *MetadataEntry) SetName(v string) *MetadataEntry { - s.Name = &v - return s -} - -// SetValue sets the Value field's value. -func (s *MetadataEntry) SetValue(v string) *MetadataEntry { - s.Value = &v - return s -} - -// A container specifying replication metrics-related settings enabling metrics -// and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified -// together with a ReplicationTime block. -type Metrics struct { - _ struct{} `type:"structure"` - - // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold - // event. - // - // EventThreshold is a required field - EventThreshold *ReplicationTimeValue `type:"structure" required:"true"` - - // Specifies whether the replication metrics are enabled. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"MetricsStatus"` -} - -// String returns the string representation -func (s Metrics) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Metrics) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Metrics) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Metrics"} - if s.EventThreshold == nil { - invalidParams.Add(request.NewErrParamRequired("EventThreshold")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEventThreshold sets the EventThreshold field's value. -func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { - s.EventThreshold = v - return s -} - -// SetStatus sets the Status field's value. -func (s *Metrics) SetStatus(v string) *Metrics { - s.Status = &v - return s -} - -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates, and an object -// must match all of the predicates in order for the filter to apply. -type MetricsAndOperator struct { - _ struct{} `type:"structure"` - - // The prefix used when evaluating an AND predicate. - Prefix *string `type:"string"` - - // The list of tags used when evaluating an AND predicate. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s MetricsAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricsAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { - s.Tags = v - return s -} - -// Specifies a metrics configuration for the CloudWatch request metrics (specified -// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating -// an existing metrics configuration, note that this is a full replacement of -// the existing metrics configuration. If you don't include the elements you -// want to keep, they are erased. For more information, see PUT Bucket metrics -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) -// in the Amazon Simple Storage Service API Reference. -type MetricsConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies a metrics configuration filter. The metrics configuration will - // only include objects that meet the filter's criteria. A filter must be a - // prefix, a tag, or a conjunction (MetricsAndOperator). - Filter *MetricsFilter `type:"structure"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s MetricsConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricsConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFilter sets the Filter field's value. -func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { - s.Id = &v - return s -} - -// Specifies a metrics configuration filter. The metrics configuration only -// includes objects that meet the filter's criteria. A filter must be a prefix, -// a tag, or a conjunction (MetricsAndOperator). -type MetricsFilter struct { - _ struct{} `type:"structure"` - - // A conjunction (logical AND) of predicates, which is used in evaluating a - // metrics filter. The operator must have at least two predicates, and an object - // must match all of the predicates in order for the filter to apply. - And *MetricsAndOperator `type:"structure"` - - // The prefix used when evaluating a metrics filter. - Prefix *string `type:"string"` - - // The tag used when evaluating a metrics filter. - Tag *Tag `type:"structure"` -} - -// String returns the string representation -func (s MetricsFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricsFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { - s.Tag = v - return s -} - -// Container for the MultipartUpload for the Amazon S3 object. -type MultipartUpload struct { - _ struct{} `type:"structure"` - - // Date and time at which the multipart upload was initiated. - Initiated *time.Time `type:"timestamp"` - - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` - - // Key of the object for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - // Specifies the owner of the object that is part of the multipart upload. - Owner *Owner `type:"structure"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` - - // Upload ID that identifies the multipart upload. - UploadId *string `type:"string"` -} - -// String returns the string representation -func (s MultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MultipartUpload) GoString() string { - return s.String() -} - -// SetInitiated sets the Initiated field's value. -func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { - s.Initiated = &v - return s -} - -// SetInitiator sets the Initiator field's value. -func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { - s.Initiator = v - return s -} - -// SetKey sets the Key field's value. -func (s *MultipartUpload) SetKey(v string) *MultipartUpload { - s.Key = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { - s.Owner = v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { - s.StorageClass = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { - s.UploadId = &v - return s -} - -// Specifies when noncurrent object versions expire. Upon expiration, Amazon -// S3 permanently deletes the noncurrent object versions. You set this lifecycle -// configuration action on a bucket that has versioning enabled (or suspended) -// to request that Amazon S3 delete noncurrent object versions at a specific -// period in the object's lifetime. -type NoncurrentVersionExpiration struct { - _ struct{} `type:"structure"` - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon Simple Storage Service Developer Guide. - NoncurrentDays *int64 `type:"integer"` -} - -// String returns the string representation -func (s NoncurrentVersionExpiration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoncurrentVersionExpiration) GoString() string { - return s.String() -} - -// SetNoncurrentDays sets the NoncurrentDays field's value. -func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { - s.NoncurrentDays = &v - return s -} - -// Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, -// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning -// is suspended), you can set this action to request that Amazon S3 transition -// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, -// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's -// lifetime. -type NoncurrentVersionTransition struct { - _ struct{} `type:"structure"` - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon Simple Storage Service Developer Guide. - NoncurrentDays *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` -} - -// String returns the string representation -func (s NoncurrentVersionTransition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoncurrentVersionTransition) GoString() string { - return s.String() -} - -// SetNoncurrentDays sets the NoncurrentDays field's value. -func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { - s.NoncurrentDays = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { - s.StorageClass = &v - return s -} - -// A container for specifying the notification configuration of the bucket. -// If this element is empty, notifications are turned off for the bucket. -type NotificationConfiguration struct { - _ struct{} `type:"structure"` - - // Describes the AWS Lambda functions to invoke and the events for which to - // invoke them. - LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` - - // The Amazon Simple Queue Service queues to publish messages to and the events - // for which to publish messages. - QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` - - // The topic to which notifications are sent and the events for which notifications - // are generated. - TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s NotificationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *NotificationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} - if s.LambdaFunctionConfigurations != nil { - for i, v := range s.LambdaFunctionConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.QueueConfigurations != nil { - for i, v := range s.QueueConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.TopicConfigurations != nil { - for i, v := range s.TopicConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. -func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { - s.LambdaFunctionConfigurations = v - return s -} - -// SetQueueConfigurations sets the QueueConfigurations field's value. -func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { - s.QueueConfigurations = v - return s -} - -// SetTopicConfigurations sets the TopicConfigurations field's value. -func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { - s.TopicConfigurations = v - return s -} - -type NotificationConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // Container for specifying the AWS Lambda notification configuration. - CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` - - // This data type is deprecated. This data type specifies the configuration - // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue - // when Amazon S3 detects specified events. - QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` - - // This data type is deprecated. A container for specifying the configuration - // for publication of messages to an Amazon Simple Notification Service (Amazon - // SNS) topic when Amazon S3 detects specified events. - TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` -} - -// String returns the string representation -func (s NotificationConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfigurationDeprecated) GoString() string { - return s.String() -} - -// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { - s.CloudFunctionConfiguration = v - return s -} - -// SetQueueConfiguration sets the QueueConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { - s.QueueConfiguration = v - return s -} - -// SetTopicConfiguration sets the TopicConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { - s.TopicConfiguration = v - return s -} - -// Specifies object key name filtering rules. For information about key name -// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon Simple Storage Service Developer Guide. -type NotificationConfigurationFilter struct { - _ struct{} `type:"structure"` - - // A container for object key name prefix and suffix filtering rules. - Key *KeyFilter `locationName:"S3Key" type:"structure"` -} - -// String returns the string representation -func (s NotificationConfigurationFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfigurationFilter) GoString() string { - return s.String() -} - -// SetKey sets the Key field's value. -func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { - s.Key = v - return s -} - -// An object consists of data and its descriptive metadata. -type Object struct { - _ struct{} `type:"structure"` - - // The entity tag is an MD5 hash of the object. ETag reflects only changes to - // the contents of an object, not its metadata. - ETag *string `type:"string"` - - // The name that you assign to an object. You use the object key to retrieve - // the object. - Key *string `min:"1" type:"string"` - - // The date the Object was Last Modified - LastModified *time.Time `type:"timestamp"` - - // The owner of the object - Owner *Owner `type:"structure"` - - // Size in bytes of the object - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectStorageClass"` -} - -// String returns the string representation -func (s Object) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Object) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *Object) SetETag(v string) *Object { - s.ETag = &v - return s -} - -// SetKey sets the Key field's value. -func (s *Object) SetKey(v string) *Object { - s.Key = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *Object) SetLastModified(v time.Time) *Object { - s.LastModified = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *Object) SetOwner(v *Owner) *Object { - s.Owner = v - return s -} - -// SetSize sets the Size field's value. -func (s *Object) SetSize(v int64) *Object { - s.Size = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Object) SetStorageClass(v string) *Object { - s.StorageClass = &v - return s -} - -// Object Identifier is unique value to identify objects. -type ObjectIdentifier struct { - _ struct{} `type:"structure"` - - // Key name of the object to delete. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // VersionId for the specific version of the object to delete. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s ObjectIdentifier) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectIdentifier) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ObjectIdentifier) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { - s.VersionId = &v - return s -} - -// The container element for Object Lock configuration parameters. -type ObjectLockConfiguration struct { - _ struct{} `type:"structure"` - - // Indicates whether this bucket has an Object Lock configuration enabled. - ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` - - // The Object Lock rule in place for the specified object. - Rule *ObjectLockRule `type:"structure"` -} - -// String returns the string representation -func (s ObjectLockConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectLockConfiguration) GoString() string { - return s.String() -} - -// SetObjectLockEnabled sets the ObjectLockEnabled field's value. -func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { - s.ObjectLockEnabled = &v - return s -} - -// SetRule sets the Rule field's value. -func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { - s.Rule = v - return s -} - -// A Legal Hold configuration for an object. -type ObjectLockLegalHold struct { - _ struct{} `type:"structure"` - - // Indicates whether the specified object has a Legal Hold in place. - Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` -} - -// String returns the string representation -func (s ObjectLockLegalHold) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectLockLegalHold) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { - s.Status = &v - return s -} - -// A Retention configuration for an object. -type ObjectLockRetention struct { - _ struct{} `type:"structure"` - - // Indicates the Retention mode for the specified object. - Mode *string `type:"string" enum:"ObjectLockRetentionMode"` - - // The date on which this Object Lock Retention will expire. - RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` -} - -// String returns the string representation -func (s ObjectLockRetention) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectLockRetention) GoString() string { - return s.String() -} - -// SetMode sets the Mode field's value. -func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { - s.Mode = &v - return s -} - -// SetRetainUntilDate sets the RetainUntilDate field's value. -func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { - s.RetainUntilDate = &v - return s -} - -// The container element for an Object Lock rule. -type ObjectLockRule struct { - _ struct{} `type:"structure"` - - // The default retention period that you want to apply to new objects placed - // in the specified bucket. - DefaultRetention *DefaultRetention `type:"structure"` -} - -// String returns the string representation -func (s ObjectLockRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectLockRule) GoString() string { - return s.String() -} - -// SetDefaultRetention sets the DefaultRetention field's value. -func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { - s.DefaultRetention = v - return s -} - -// The version of an object. -type ObjectVersion struct { - _ struct{} `type:"structure"` - - // The entity tag is an MD5 hash of that version of the object. - ETag *string `type:"string"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp"` - - // Specifies the owner of the object. - Owner *Owner `type:"structure"` - - // Size in bytes of the object. - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` - - // Version ID of an object. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s ObjectVersion) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectVersion) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *ObjectVersion) SetETag(v string) *ObjectVersion { - s.ETag = &v - return s -} - -// SetIsLatest sets the IsLatest field's value. -func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { - s.IsLatest = &v - return s -} - -// SetKey sets the Key field's value. -func (s *ObjectVersion) SetKey(v string) *ObjectVersion { - s.Key = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { - s.LastModified = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { - s.Owner = v - return s -} - -// SetSize sets the Size field's value. -func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { - s.Size = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { - s.StorageClass = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { - s.VersionId = &v - return s -} - -// Describes the location where the restore job's output is stored. -type OutputLocation struct { - _ struct{} `type:"structure"` - - // Describes an S3 location that will receive the results of the restore request. - S3 *Location `type:"structure"` -} - -// String returns the string representation -func (s OutputLocation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputLocation) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *OutputLocation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} - if s.S3 != nil { - if err := s.S3.Validate(); err != nil { - invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3 sets the S3 field's value. -func (s *OutputLocation) SetS3(v *Location) *OutputLocation { - s.S3 = v - return s -} - -// Describes how results of the Select job are serialized. -type OutputSerialization struct { - _ struct{} `type:"structure"` - - // Describes the serialization of CSV-encoded Select results. - CSV *CSVOutput `type:"structure"` - - // Specifies JSON as request's output serialization format. - JSON *JSONOutput `type:"structure"` -} - -// String returns the string representation -func (s OutputSerialization) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputSerialization) GoString() string { - return s.String() -} - -// SetCSV sets the CSV field's value. -func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { - s.CSV = v - return s -} - -// SetJSON sets the JSON field's value. -func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { - s.JSON = v - return s -} - -// Container for the owner's display name and ID. -type Owner struct { - _ struct{} `type:"structure"` - - // Container for the display name of the owner. - DisplayName *string `type:"string"` - - // Container for the ID of the owner. - ID *string `type:"string"` -} - -// String returns the string representation -func (s Owner) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Owner) GoString() string { - return s.String() -} - -// SetDisplayName sets the DisplayName field's value. -func (s *Owner) SetDisplayName(v string) *Owner { - s.DisplayName = &v - return s -} - -// SetID sets the ID field's value. -func (s *Owner) SetID(v string) *Owner { - s.ID = &v - return s -} - -// Container for Parquet. -type ParquetInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s ParquetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ParquetInput) GoString() string { - return s.String() -} - -// Container for elements related to a part. -type Part struct { - _ struct{} `type:"structure"` - - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Date and time at which the part was uploaded. - LastModified *time.Time `type:"timestamp"` - - // Part number identifying the part. This is a positive integer between 1 and - // 10,000. - PartNumber *int64 `type:"integer"` - - // Size in bytes of the uploaded part data. - Size *int64 `type:"integer"` -} - -// String returns the string representation -func (s Part) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Part) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *Part) SetETag(v string) *Part { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *Part) SetLastModified(v time.Time) *Part { - s.LastModified = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *Part) SetPartNumber(v int64) *Part { - s.PartNumber = &v - return s -} - -// SetSize sets the Size field's value. -func (s *Part) SetSize(v int64) *Part { - s.Size = &v - return s -} - -// The container element for a bucket's policy status. -type PolicyStatus struct { - _ struct{} `type:"structure"` - - // The policy status for this bucket. TRUE indicates that this bucket is public. - // FALSE indicates that the bucket is not public. - IsPublic *bool `locationName:"IsPublic" type:"boolean"` -} - -// String returns the string representation -func (s PolicyStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PolicyStatus) GoString() string { - return s.String() -} - -// SetIsPublic sets the IsPublic field's value. -func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { - s.IsPublic = &v - return s -} - -// This data type contains information about progress of an operation. -type Progress struct { - _ struct{} `type:"structure"` - - // The current number of uncompressed object bytes processed. - BytesProcessed *int64 `type:"long"` - - // The current number of bytes of records payload data returned. - BytesReturned *int64 `type:"long"` - - // The current number of object bytes scanned. - BytesScanned *int64 `type:"long"` -} - -// String returns the string representation -func (s Progress) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Progress) GoString() string { - return s.String() -} - -// SetBytesProcessed sets the BytesProcessed field's value. -func (s *Progress) SetBytesProcessed(v int64) *Progress { - s.BytesProcessed = &v - return s -} - -// SetBytesReturned sets the BytesReturned field's value. -func (s *Progress) SetBytesReturned(v int64) *Progress { - s.BytesReturned = &v - return s -} - -// SetBytesScanned sets the BytesScanned field's value. -func (s *Progress) SetBytesScanned(v int64) *Progress { - s.BytesScanned = &v - return s -} - -// This data type contains information about the progress event of an operation. -type ProgressEvent struct { - _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` - - // The Progress event details. - Details *Progress `locationName:"Details" type:"structure"` -} - -// String returns the string representation -func (s ProgressEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ProgressEvent) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { - s.Details = v - return s -} - -// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. -func (s *ProgressEvent) eventSelectObjectContentEventStream() {} - -// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *ProgressEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - if err := payloadUnmarshaler.UnmarshalPayload( - bytes.NewReader(msg.Payload), s, - ); err != nil { - return err - } - return nil -} - -func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) - var buf bytes.Buffer - if err = pm.MarshalPayload(&buf, s); err != nil { - return eventstream.Message{}, err - } - msg.Payload = buf.Bytes() - return msg, err -} - -// The PublicAccessBlock configuration that you want to apply to this Amazon -// S3 bucket. You can enable the configuration options in any combination. For -// more information about when Amazon S3 considers a bucket or object public, -// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon Simple Storage Service Developer Guide. -type PublicAccessBlockConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies whether Amazon S3 should block public access control lists (ACLs) - // for this bucket and objects in this bucket. Setting this element to TRUE - // causes the following behavior: - // - // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is - // public. - // - // * PUT Object calls fail if the request includes a public ACL. - // - // * PUT Bucket calls fail if the request includes a public ACL. - // - // Enabling this setting doesn't affect existing policies or ACLs. - BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` - - // Specifies whether Amazon S3 should block public bucket policies for this - // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to - // PUT Bucket policy if the specified bucket policy allows public access. - // - // Enabling this setting doesn't affect existing bucket policies. - BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` - - // Specifies whether Amazon S3 should ignore public ACLs for this bucket and - // objects in this bucket. Setting this element to TRUE causes Amazon S3 to - // ignore all public ACLs on this bucket and objects in this bucket. - // - // Enabling this setting doesn't affect the persistence of any existing ACLs - // and doesn't prevent new public ACLs from being set. - IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` - - // Specifies whether Amazon S3 should restrict public bucket policies for this - // bucket. Setting this element to TRUE restricts access to this bucket to only - // AWS services and authorized users within this account if the bucket has a - // public policy. - // - // Enabling this setting doesn't affect previously stored bucket policies, except - // that public and cross-account access within any public bucket policy, including - // non-public delegation to specific accounts, is blocked. - RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` -} - -// String returns the string representation -func (s PublicAccessBlockConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PublicAccessBlockConfiguration) GoString() string { - return s.String() -} - -// SetBlockPublicAcls sets the BlockPublicAcls field's value. -func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { - s.BlockPublicAcls = &v - return s -} - -// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. -func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { - s.BlockPublicPolicy = &v - return s -} - -// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. -func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { - s.IgnorePublicAcls = &v - return s -} - -// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. -func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { - s.RestrictPublicBuckets = &v - return s -} - -type PutBucketAccelerateConfigurationInput struct { - _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` - - // Container for setting the transfer acceleration state. - // - // AccelerateConfiguration is a required field - AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Name of the bucket for which the accelerate configuration is set. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutBucketAccelerateConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAccelerateConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} - if s.AccelerateConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) - } - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. -func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { - s.AccelerateConfiguration = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketAccelerateConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketAccelerateConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAccelerateConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketAclInput struct { - _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - // Contains the elements that set the ACL permissions for an object per grantee. - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The bucket to which to apply the ACL. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` -} - -// String returns the string representation -func (s PutBucketAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { - s.ACL = &v - return s -} - -// SetAccessControlPolicy sets the AccessControlPolicy field's value. -func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { - s.AccessControlPolicy = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { - s.Bucket = &v - return s -} - -func (s *PutBucketAclInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWrite sets the GrantWrite field's value. -func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { - s.GrantWrite = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { - s.GrantWriteACP = &v - return s -} - -func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketAclInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketAclOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAclOutput) GoString() string { - return s.String() -} - -type PutBucketAnalyticsConfigurationInput struct { - _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` - - // The configuration and any analyses for the analytics filter. - // - // AnalyticsConfiguration is a required field - AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The name of the bucket to which an analytics configuration is stored. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID that identifies the analytics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutBucketAnalyticsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAnalyticsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} - if s.AnalyticsConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) - } - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.AnalyticsConfiguration != nil { - if err := s.AnalyticsConfiguration.Validate(); err != nil { - invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { - s.AnalyticsConfiguration = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { - s.Id = &v - return s -} - -func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketAnalyticsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAnalyticsConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketCorsInput struct { - _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` - - // Specifies the bucket impacted by the corsconfiguration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Describes the cross-origin access configuration for objects in an Amazon - // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing - // (https://docs.aws.amazon.com/AmazonS3/latest/dev//cors.html) in the Amazon - // Simple Storage Service Developer Guide. - // - // CORSConfiguration is a required field - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.CORSConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) - } - if s.CORSConfiguration != nil { - if err := s.CORSConfiguration.Validate(); err != nil { - invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { - s.Bucket = &v - return s -} - -func (s *PutBucketCorsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCORSConfiguration sets the CORSConfiguration field's value. -func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { - s.CORSConfiguration = v - return s -} - -func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketCorsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketCorsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketCorsOutput) GoString() string { - return s.String() -} - -type PutBucketEncryptionInput struct { - _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` - - // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS - // (SSE-KMS). For information about the Amazon S3 default encryption feature, - // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies the default server-side-encryption configuration. - // - // ServerSideEncryptionConfiguration is a required field - ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketEncryptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketEncryptionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketEncryptionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.ServerSideEncryptionConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) - } - if s.ServerSideEncryptionConfiguration != nil { - if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { - invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { - s.Bucket = &v - return s -} - -func (s *PutBucketEncryptionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. -func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { - s.ServerSideEncryptionConfiguration = v - return s -} - -func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketEncryptionInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketEncryptionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketEncryptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketEncryptionOutput) GoString() string { - return s.String() -} - -type PutBucketInventoryConfigurationInput struct { - _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` - - // The name of the bucket where the inventory configuration will be stored. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Specifies the inventory configuration. - // - // InventoryConfiguration is a required field - InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketInventoryConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketInventoryConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.InventoryConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) - } - if s.InventoryConfiguration != nil { - if err := s.InventoryConfiguration.Validate(); err != nil { - invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { - s.Id = &v - return s -} - -// SetInventoryConfiguration sets the InventoryConfiguration field's value. -func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { - s.InventoryConfiguration = v - return s -} - -func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketInventoryConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketInventoryConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketLifecycleConfigurationInput struct { - _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` - - // The name of the bucket for which to set the configuration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for lifecycle rules. You can add as many as 1,000 rules. - LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketLifecycleConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. -func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { - s.LifecycleConfiguration = v - return s -} - -func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketLifecycleInput struct { - _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for lifecycle rules. You can add as many as 1000 rules. - LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { - s.Bucket = &v - return s -} - -func (s *PutBucketLifecycleInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. -func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { - s.LifecycleConfiguration = v - return s -} - -func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketLifecycleInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketLifecycleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleOutput) GoString() string { - return s.String() -} - -type PutBucketLoggingInput struct { - _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` - - // The name of the bucket for which to set the logging parameters. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for logging status information. - // - // BucketLoggingStatus is a required field - BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLoggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.BucketLoggingStatus == nil { - invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) - } - if s.BucketLoggingStatus != nil { - if err := s.BucketLoggingStatus.Validate(); err != nil { - invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { - s.Bucket = &v - return s -} - -func (s *PutBucketLoggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. -func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { - s.BucketLoggingStatus = v - return s -} - -func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketLoggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketLoggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLoggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLoggingOutput) GoString() string { - return s.String() -} - -type PutBucketMetricsConfigurationInput struct { - _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` - - // The name of the bucket for which the metrics configuration is set. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Specifies the metrics configuration. - // - // MetricsConfiguration is a required field - MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketMetricsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketMetricsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.MetricsConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) - } - if s.MetricsConfiguration != nil { - if err := s.MetricsConfiguration.Validate(); err != nil { - invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { - s.Id = &v - return s -} - -// SetMetricsConfiguration sets the MetricsConfiguration field's value. -func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { - s.MetricsConfiguration = v - return s -} - -func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketMetricsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketMetricsConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketNotificationConfigurationInput struct { - _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` - - // The name of the bucket. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A container for specifying the notification configuration of the bucket. - // If this element is empty, notifications are turned off for the bucket. - // - // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketNotificationConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) - } - if s.NotificationConfiguration != nil { - if err := s.NotificationConfiguration.Validate(); err != nil { - invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { - s.NotificationConfiguration = v - return s -} - -func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketNotificationConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketNotificationConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketNotificationInput struct { - _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` - - // The name of the bucket. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The container for the configuration. - // - // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketNotificationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketNotificationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { - s.NotificationConfiguration = v - return s -} - -func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketNotificationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketNotificationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketNotificationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationOutput) GoString() string { - return s.String() -} - -type PutBucketPolicyInput struct { - _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` - - // The name of the bucket. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Set this parameter to true to confirm that you want to remove your permissions - // to change this bucket policy in the future. - ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` - - // The bucket policy as a JSON document. - // - // Policy is a required field - Policy *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s PutBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { - s.Bucket = &v - return s -} - -func (s *PutBucketPolicyInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. -func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { - s.ConfirmRemoveSelfBucketAccess = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { - s.Policy = &v - return s -} - -func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketPolicyInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketPolicyOutput) GoString() string { - return s.String() -} - -type PutBucketReplicationInput struct { - _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` - - // The name of the bucket - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A container for replication rules. You can add up to 1,000 rules. The maximum - // size of a replication configuration is 2 MB. - // - // ReplicationConfiguration is a required field - ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` -} - -// String returns the string representation -func (s PutBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.ReplicationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) - } - if s.ReplicationConfiguration != nil { - if err := s.ReplicationConfiguration.Validate(); err != nil { - invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketReplicationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetReplicationConfiguration sets the ReplicationConfiguration field's value. -func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { - s.ReplicationConfiguration = v - return s -} - -// SetToken sets the Token field's value. -func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { - s.Token = &v - return s -} - -func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketReplicationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketReplicationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketReplicationOutput) GoString() string { - return s.String() -} - -type PutBucketRequestPaymentInput struct { - _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for Payer. - // - // RequestPaymentConfiguration is a required field - RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketRequestPaymentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.RequestPaymentConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) - } - if s.RequestPaymentConfiguration != nil { - if err := s.RequestPaymentConfiguration.Validate(); err != nil { - invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { - s.Bucket = &v - return s -} - -func (s *PutBucketRequestPaymentInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. -func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { - s.RequestPaymentConfiguration = v - return s -} - -func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketRequestPaymentOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -type PutBucketTaggingInput struct { - _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for the TagSet and Tag elements. - // - // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { - s.Bucket = &v - return s -} - -func (s *PutBucketTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetTagging sets the Tagging field's value. -func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { - s.Tagging = v - return s -} - -func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketTaggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketTaggingOutput) GoString() string { - return s.String() -} - -type PutBucketVersioningInput struct { - _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Container for setting the versioning state. - // - // VersioningConfiguration is a required field - VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketVersioningInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketVersioningInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.VersioningConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { - s.Bucket = &v - return s -} - -func (s *PutBucketVersioningInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetMFA sets the MFA field's value. -func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { - s.MFA = &v - return s -} - -// SetVersioningConfiguration sets the VersioningConfiguration field's value. -func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { - s.VersioningConfiguration = v - return s -} - -func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketVersioningInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketVersioningOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketVersioningOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketVersioningOutput) GoString() string { - return s.String() -} - -type PutBucketWebsiteInput struct { - _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for the request. - // - // WebsiteConfiguration is a required field - WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.WebsiteConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) - } - if s.WebsiteConfiguration != nil { - if err := s.WebsiteConfiguration.Validate(); err != nil { - invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { - s.Bucket = &v - return s -} - -func (s *PutBucketWebsiteInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. -func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { - s.WebsiteConfiguration = v - return s -} - -func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketWebsiteInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutBucketWebsiteOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketWebsiteOutput) GoString() string { - return s.String() -} - -type PutObjectAclInput struct { - _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Contains the elements that set the ACL permissions for an object per grantee. - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The bucket name that contains the object to which you want to attach the - // ACL. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Key for which the PUT operation was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s PutObjectAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { - s.ACL = &v - return s -} - -// SetAccessControlPolicy sets the AccessControlPolicy field's value. -func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { - s.AccessControlPolicy = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { - s.Bucket = &v - return s -} - -func (s *PutObjectAclInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWrite sets the GrantWrite field's value. -func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { - s.GrantWrite = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { - s.VersionId = &v - return s -} - -func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectAclInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutObjectAclOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s PutObjectAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectAclOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { - s.RequestCharged = &v - return s -} - -type PutObjectInput struct { - _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` - - // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Object data. - Body io.ReadSeeker `type:"blob"` - - // Bucket name to which the PUT operation was initiated. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Can be used to specify caching behavior along the request/reply chain. For - // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The base64-encoded 128-bit MD5 digest of the message (without the headers) - // according to RFC 1864. This header can be used as a message integrity check - // to verify that the data is the same data that was originally sent. Although - // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end - // integrity check. For more information about REST request authentication, - // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). - ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - - // A standard MIME type describing the format of the contents. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Object key for which the PUT operation was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Specifies whether a legal hold will be applied to this object. For more information - // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The Object Lock mode that you want to apply to this object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when you want this object's Object Lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS Encryption Context to use for object encryption. The - // value of this header is a base64-encoded UTF-8 string holding JSON with the - // encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // If x-amz-server-side-encryption is present and has the value of aws:kms, - // this header specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetrical customer managed customer master key (CMK) that was used for - // the object. - // - // If the value of x-amz-server-side-encryption is aws:kms, this header specifies - // the ID of the symmetric customer managed AWS KMS CMK that will be used for - // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not - // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS - // managed CMK in AWS to protect the data. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // If you don't specify, Standard is the default storage class. Amazon S3 supports - // other storage classes. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The tag-set for the object. The tag-set must be encoded as URL Query parameters. - // (For example, "Key1=Value1") - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). - // - // In the following example, the request header sets the redirect to an object - // (anotherPage.html) in the same bucket: - // - // x-amz-website-redirect-location: /anotherPage.html - // - // In the following example, the request header sets the object redirect to - // another website: - // - // x-amz-website-redirect-location: http://www.example.com/ - // - // For more information about website hosting in Amazon S3, see Hosting Websites - // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s PutObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *PutObjectInput) SetACL(v string) *PutObjectInput { - s.ACL = &v - return s -} - -// SetBody sets the Body field's value. -func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { - s.Body = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { - s.Bucket = &v - return s -} - -func (s *PutObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCacheControl sets the CacheControl field's value. -func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { - s.CacheControl = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { - s.ContentLanguage = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { - s.ContentLength = &v - return s -} - -// SetContentMD5 sets the ContentMD5 field's value. -func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { - s.ContentMD5 = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { - s.ContentType = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { - s.Expires = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectInput) SetKey(v string) *PutObjectInput { - s.Key = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { - s.Metadata = v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { - s.SSECustomerKey = &v - return s -} - -func (s *PutObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { - s.Tagging = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { - s.WebsiteRedirectLocation = &v - return s -} - -func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutObjectLegalHoldInput struct { - _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` - - // The bucket name containing the object that you want to place a Legal Hold - // on. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The key name for the object that you want to place a Legal Hold on. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Container element for the Legal Hold configuration you want to apply to the - // specified object. - LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The version ID of the object that you want to place a Legal Hold on. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s PutObjectLegalHoldInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectLegalHoldInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectLegalHoldInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { - s.Bucket = &v - return s -} - -func (s *PutObjectLegalHoldInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { - s.Key = &v - return s -} - -// SetLegalHold sets the LegalHold field's value. -func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { - s.LegalHold = v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { - s.VersionId = &v - return s -} - -func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutObjectLegalHoldOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s PutObjectLegalHoldOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectLegalHoldOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { - s.RequestCharged = &v - return s -} - -type PutObjectLockConfigurationInput struct { - _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` - - // The bucket whose Object Lock configuration you want to create or replace. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The Object Lock configuration that you want to apply to the specified bucket. - ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // A token to allow Object Lock to be enabled for an existing bucket. - Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` -} - -// String returns the string representation -func (s PutObjectLockConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectLockConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectLockConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutObjectLockConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. -func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { - s.ObjectLockConfiguration = v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { - s.RequestPayer = &v - return s -} - -// SetToken sets the Token field's value. -func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { - s.Token = &v - return s -} - -func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutObjectLockConfigurationOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s PutObjectLockConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectLockConfigurationOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { - s.RequestCharged = &v - return s -} - -type PutObjectOutput struct { - _ struct{} `type:"structure"` - - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the expiration is configured for the object (see PutBucketLifecycleConfiguration), - // the response includes this header. It includes the expiry-date and rule-id - // key-value pairs that provide information about object expiration. The value - // of the rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the AWS KMS Encryption Context to use for object encryption. - // The value of this header is a base64-encoded UTF-8 string holding JSON with - // the encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // If x-amz-server-side-encryption is present and has the value of aws:kms, - // this header specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // If you specified server-side encryption either with an AWS KMS customer master - // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response - // includes this header. It confirms the encryption algorithm that Amazon S3 - // used to encrypt the object. - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s PutObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectOutput) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { - s.Expiration = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { - s.VersionId = &v - return s -} - -type PutObjectRetentionInput struct { - _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` - - // The bucket name that contains the object you want to apply this Object Retention - // configuration to. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates whether this operation should bypass Governance-mode restrictions. - BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - - // The key name for the object that you want to apply this Object Retention - // configuration to. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The container element for the Object Retention configuration. - Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The version ID for the object that you want to apply this Object Retention - // configuration to. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s PutObjectRetentionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectRetentionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectRetentionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { - s.Bucket = &v - return s -} - -func (s *PutObjectRetentionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. -func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { - s.BypassGovernanceRetention = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { - s.RequestPayer = &v - return s -} - -// SetRetention sets the Retention field's value. -func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { - s.Retention = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { - s.VersionId = &v - return s -} - -func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectRetentionInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutObjectRetentionOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s PutObjectRetentionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectRetentionOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { - s.RequestCharged = &v - return s -} - -type PutObjectTaggingInput struct { - _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` - - // The bucket name containing the object. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Name of the tag. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Container for the TagSet and Tag elements - // - // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The versionId of the object that the tag-set will be added to. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s PutObjectTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { - s.Bucket = &v - return s -} - -func (s *PutObjectTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { - s.Key = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { - s.Tagging = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { - s.VersionId = &v - return s -} - -func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutObjectTaggingOutput struct { - _ struct{} `type:"structure"` - - // The versionId of the object the tag-set was added to. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s PutObjectTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectTaggingOutput) GoString() string { - return s.String() -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { - s.VersionId = &v - return s -} - -type PutPublicAccessBlockInput struct { - _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` - - // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you - // want to set. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The PublicAccessBlock configuration that you want to apply to this Amazon - // S3 bucket. You can enable the configuration options in any combination. For - // more information about when Amazon S3 considers a bucket or object public, - // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon Simple Storage Service Developer Guide. - // - // PublicAccessBlockConfiguration is a required field - PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation -func (s PutPublicAccessBlockInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutPublicAccessBlockInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutPublicAccessBlockInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.PublicAccessBlockConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { - s.Bucket = &v - return s -} - -func (s *PutPublicAccessBlockInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. -func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { - s.PublicAccessBlockConfiguration = v - return s -} - -func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type PutPublicAccessBlockOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutPublicAccessBlockOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutPublicAccessBlockOutput) GoString() string { - return s.String() -} - -// Specifies the configuration for publishing messages to an Amazon Simple Queue -// Service (Amazon SQS) queue when Amazon S3 detects specified events. -type QueueConfiguration struct { - _ struct{} `type:"structure"` - - // A collection of bucket events for which to send notifications - // - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Filter *NotificationConfigurationFilter `type:"structure"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 - // publishes a message when it detects events of the specified type. - // - // QueueArn is a required field - QueueArn *string `locationName:"Queue" type:"string" required:"true"` -} - -// String returns the string representation -func (s QueueConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s QueueConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *QueueConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.QueueArn == nil { - invalidParams.Add(request.NewErrParamRequired("QueueArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEvents sets the Events field's value. -func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { - s.Events = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { - s.Id = &v - return s -} - -// SetQueueArn sets the QueueArn field's value. -func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { - s.QueueArn = &v - return s -} - -// This data type is deprecated. Use QueueConfiguration for the same purposes. -// This data type specifies the configuration for publishing messages to an -// Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified -// events. -type QueueConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // The bucket event for which to send notifications. - // - // Deprecated: Event has been deprecated - Event *string `deprecated:"true" type:"string" enum:"Event"` - - // A collection of bucket events for which to send notifications - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 - // publishes a message when it detects events of the specified type. - Queue *string `type:"string"` -} - -// String returns the string representation -func (s QueueConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s QueueConfigurationDeprecated) GoString() string { - return s.String() -} - -// SetEvent sets the Event field's value. -func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { - s.Event = &v - return s -} - -// SetEvents sets the Events field's value. -func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { - s.Events = v - return s -} - -// SetId sets the Id field's value. -func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { - s.Id = &v - return s -} - -// SetQueue sets the Queue field's value. -func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { - s.Queue = &v - return s -} - -// The container for the records event. -type RecordsEvent struct { - _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` - - // The byte array of partial, one or more result records. - // - // Payload is automatically base64 encoded/decoded by the SDK. - Payload []byte `type:"blob"` -} - -// String returns the string representation -func (s RecordsEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RecordsEvent) GoString() string { - return s.String() -} - -// SetPayload sets the Payload field's value. -func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { - s.Payload = v - return s -} - -// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. -func (s *RecordsEvent) eventSelectObjectContentEventStream() {} - -// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *RecordsEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - s.Payload = make([]byte, len(msg.Payload)) - copy(s.Payload, msg.Payload) - return nil -} - -func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) - msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) - msg.Payload = s.Payload - return msg, err -} - -// Specifies how requests are redirected. In the event of an error, you can -// specify a different error code to return. -type Redirect struct { - _ struct{} `type:"structure"` - - // The host name to use in the redirect request. - HostName *string `type:"string"` - - // The HTTP redirect code to use on the response. Not required if one of the - // siblings is present. - HttpRedirectCode *string `type:"string"` - - // Protocol to use when redirecting requests. The default is the protocol that - // is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` - - // The object key prefix to use in the redirect request. For example, to redirect - // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/, you can set a condition block with KeyPrefixEquals set to docs/ - // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required - // if one of the siblings is present. Can be present only if ReplaceKeyWith - // is not provided. - ReplaceKeyPrefixWith *string `type:"string"` - - // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the siblings is present. Can - // be present only if ReplaceKeyPrefixWith is not provided. - ReplaceKeyWith *string `type:"string"` -} - -// String returns the string representation -func (s Redirect) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Redirect) GoString() string { - return s.String() -} - -// SetHostName sets the HostName field's value. -func (s *Redirect) SetHostName(v string) *Redirect { - s.HostName = &v - return s -} - -// SetHttpRedirectCode sets the HttpRedirectCode field's value. -func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { - s.HttpRedirectCode = &v - return s -} - -// SetProtocol sets the Protocol field's value. -func (s *Redirect) SetProtocol(v string) *Redirect { - s.Protocol = &v - return s -} - -// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. -func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { - s.ReplaceKeyPrefixWith = &v - return s -} - -// SetReplaceKeyWith sets the ReplaceKeyWith field's value. -func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { - s.ReplaceKeyWith = &v - return s -} - -// Specifies the redirect behavior of all requests to a website endpoint of -// an Amazon S3 bucket. -type RedirectAllRequestsTo struct { - _ struct{} `type:"structure"` - - // Name of the host where requests are redirected. - // - // HostName is a required field - HostName *string `type:"string" required:"true"` - - // Protocol to use when redirecting requests. The default is the protocol that - // is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` -} - -// String returns the string representation -func (s RedirectAllRequestsTo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RedirectAllRequestsTo) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RedirectAllRequestsTo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} - if s.HostName == nil { - invalidParams.Add(request.NewErrParamRequired("HostName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetHostName sets the HostName field's value. -func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { - s.HostName = &v - return s -} - -// SetProtocol sets the Protocol field's value. -func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { - s.Protocol = &v - return s -} - -// A container for replication rules. You can add up to 1,000 rules. The maximum -// size of a replication configuration is 2 MB. -type ReplicationConfiguration struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) role that Amazon S3 assumes when replicating objects. For more information, - // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Role is a required field - Role *string `type:"string" required:"true"` - - // A container for one or more replication rules. A replication configuration - // must have at least one rule and can contain a maximum of 1,000 rules. - // - // Rules is a required field - Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s ReplicationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) - } - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRole sets the Role field's value. -func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { - s.Role = &v - return s -} - -// SetRules sets the Rules field's value. -func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { - s.Rules = v - return s -} - -// Specifies which Amazon S3 objects to replicate and where to store the replicas. -type ReplicationRule struct { - _ struct{} `type:"structure"` - - // Specifies whether Amazon S3 replicates the delete markers. If you specify - // a Filter, you must specify this element. However, in the latest version of - // replication configuration (when Filter is specified), Amazon S3 doesn't replicate - // delete markers. Therefore, the DeleteMarkerReplication element can contain - // only Disabled. For an example configuration, see Basic Rule - // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). - // - // If you don't specify the Filter element, Amazon S3 assumes that the replication - // configuration is the earlier version, V1. In the earlier version, Amazon - // S3 handled replication of delete markers differently. For more information, - // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). - DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` - - // A container for information about the replication destination and its configurations - // including enabling the S3 Replication Time Control (S3 RTC). - // - // Destination is a required field - Destination *Destination `type:"structure" required:"true"` - - // Optional configuration to replicate existing source bucket objects. For more - // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) - // in the Amazon S3 Developer Guide. - ExistingObjectReplication *ExistingObjectReplication `type:"structure"` - - // A filter that identifies the subset of objects to which the replication rule - // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. - Filter *ReplicationRuleFilter `type:"structure"` - - // A unique identifier for the rule. The maximum value is 255 characters. - ID *string `type:"string"` - - // An object key name prefix that identifies the object or objects to which - // the rule applies. The maximum prefix length is 1,024 characters. To include - // all objects in a bucket, specify an empty string. - // - // Deprecated: Prefix has been deprecated - Prefix *string `deprecated:"true" type:"string"` - - // The priority associated with the rule. If you specify multiple rules in a - // replication configuration, Amazon S3 prioritizes the rules to prevent conflicts - // when filtering. If two or more rules identify the same object based on a - // specified filter, the rule with higher priority takes precedence. For example: - // - // * Same object quality prefix-based filter criteria if prefixes you specified - // in multiple rules overlap - // - // * Same object qualify tag-based filter criteria specified in multiple - // rules - // - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) - // in the Amazon Simple Storage Service Developer Guide. - Priority *int64 `type:"integer"` - - // A container that describes additional filters for identifying the source - // objects that you want to replicate. You can choose to enable or disable the - // replication of these objects. Currently, Amazon S3 supports only the filter - // that you can specify for objects created with server-side encryption using - // a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). - SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` - - // Specifies whether the rule is enabled. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` -} - -// String returns the string representation -func (s ReplicationRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - if s.ExistingObjectReplication != nil { - if err := s.ExistingObjectReplication.Validate(); err != nil { - invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) - } - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - if s.SourceSelectionCriteria != nil { - if err := s.SourceSelectionCriteria.Validate(); err != nil { - invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. -func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { - s.DeleteMarkerReplication = v - return s -} - -// SetDestination sets the Destination field's value. -func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { - s.Destination = v - return s -} - -// SetExistingObjectReplication sets the ExistingObjectReplication field's value. -func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { - s.ExistingObjectReplication = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { - s.Filter = v - return s -} - -// SetID sets the ID field's value. -func (s *ReplicationRule) SetID(v string) *ReplicationRule { - s.ID = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { - s.Prefix = &v - return s -} - -// SetPriority sets the Priority field's value. -func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { - s.Priority = &v - return s -} - -// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. -func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { - s.SourceSelectionCriteria = v - return s -} - -// SetStatus sets the Status field's value. -func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { - s.Status = &v - return s -} - -// A container for specifying rule filters. The filters determine the subset -// of objects to which the rule applies. This element is required only if you -// specify more than one filter. -// -// For example: -// -// * If you specify both a Prefix and a Tag filter, wrap these filters in -// an And tag. -// -// * If you specify a filter based on multiple tags, wrap the Tag elements -// in an And tag -type ReplicationRuleAndOperator struct { - _ struct{} `type:"structure"` - - // An object key name prefix that identifies the subset of objects to which - // the rule applies. - Prefix *string `type:"string"` - - // An array of tags containing key and value pairs. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s ReplicationRuleAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationRuleAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRuleAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { - s.Tags = v - return s -} - -// A filter that identifies the subset of objects to which the replication rule -// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. -type ReplicationRuleFilter struct { - _ struct{} `type:"structure"` - - // A container for specifying rule filters. The filters determine the subset - // of objects to which the rule applies. This element is required only if you - // specify more than one filter. For example: - // - // * If you specify both a Prefix and a Tag filter, wrap these filters in - // an And tag. - // - // * If you specify a filter based on multiple tags, wrap the Tag elements - // in an And tag. - And *ReplicationRuleAndOperator `type:"structure"` - - // An object key name prefix that identifies the subset of objects to which - // the rule applies. - Prefix *string `type:"string"` - - // A container for specifying a tag key and value. - // - // The rule applies only to objects that have the tag in their tag set. - Tag *Tag `type:"structure"` -} - -// String returns the string representation -func (s ReplicationRuleFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationRuleFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRuleFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { - s.Tag = v - return s -} - -// A container specifying S3 Replication Time Control (S3 RTC) related information, -// including whether S3 RTC is enabled and the time when all objects and operations -// on objects must be replicated. Must be specified together with a Metrics -// block. -type ReplicationTime struct { - _ struct{} `type:"structure"` - - // Specifies whether the replication time is enabled. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` - - // A container specifying the time by which replication should be complete for - // all objects and operations on objects. - // - // Time is a required field - Time *ReplicationTimeValue `type:"structure" required:"true"` -} - -// String returns the string representation -func (s ReplicationTime) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationTime) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationTime) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Time == nil { - invalidParams.Add(request.NewErrParamRequired("Time")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStatus sets the Status field's value. -func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { - s.Status = &v - return s -} - -// SetTime sets the Time field's value. -func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { - s.Time = v - return s -} - -// A container specifying the time value for S3 Replication Time Control (S3 -// RTC) and replication metrics EventThreshold. -type ReplicationTimeValue struct { - _ struct{} `type:"structure"` - - // Contains an integer specifying time in minutes. - // - // Valid values: 15 minutes. - Minutes *int64 `type:"integer"` -} - -// String returns the string representation -func (s ReplicationTimeValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationTimeValue) GoString() string { - return s.String() -} - -// SetMinutes sets the Minutes field's value. -func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { - s.Minutes = &v - return s -} - -// Container for Payer. -type RequestPaymentConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies who pays for the download and request fees. - // - // Payer is a required field - Payer *string `type:"string" required:"true" enum:"Payer"` -} - -// String returns the string representation -func (s RequestPaymentConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestPaymentConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RequestPaymentConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} - if s.Payer == nil { - invalidParams.Add(request.NewErrParamRequired("Payer")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPayer sets the Payer field's value. -func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { - s.Payer = &v - return s -} - -// Container for specifying if periodic QueryProgress messages should be sent. -type RequestProgress struct { - _ struct{} `type:"structure"` - - // Specifies whether periodic QueryProgress frames should be sent. Valid values: - // TRUE, FALSE. Default value: FALSE. - Enabled *bool `type:"boolean"` -} - -// String returns the string representation -func (s RequestProgress) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestProgress) GoString() string { - return s.String() -} - -// SetEnabled sets the Enabled field's value. -func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { - s.Enabled = &v - return s -} - -type RestoreObjectInput struct { - _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` - - // The bucket name or containing the object to restore. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Object key for which the operation was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Container for restore job parameters. - RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s RestoreObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.RestoreRequest != nil { - if err := s.RestoreRequest.Validate(); err != nil { - invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { - s.Bucket = &v - return s -} - -func (s *RestoreObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { - s.RequestPayer = &v - return s -} - -// SetRestoreRequest sets the RestoreRequest field's value. -func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { - s.RestoreRequest = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { - s.VersionId = &v - return s -} - -func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *RestoreObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type RestoreObjectOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Indicates the path in the provided S3 output location where Select results - // will be restored to. - RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` -} - -// String returns the string representation -func (s RestoreObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreObjectOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { - s.RequestCharged = &v - return s -} - -// SetRestoreOutputPath sets the RestoreOutputPath field's value. -func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { - s.RestoreOutputPath = &v - return s -} - -// Container for restore job parameters. -type RestoreRequest struct { - _ struct{} `type:"structure"` - - // Lifetime of the active copy in days. Do not use with restores that specify - // OutputLocation. - Days *int64 `type:"integer"` - - // The optional description for the job. - Description *string `type:"string"` - - // Glacier related parameters pertaining to this job. Do not use with restores - // that specify OutputLocation. - GlacierJobParameters *GlacierJobParameters `type:"structure"` - - // Describes the location where the restore job's output is stored. - OutputLocation *OutputLocation `type:"structure"` - - // Describes the parameters for Select job types. - SelectParameters *SelectParameters `type:"structure"` - - // Glacier retrieval tier at which the restore will be processed. - Tier *string `type:"string" enum:"Tier"` - - // Type of restore request. - Type *string `type:"string" enum:"RestoreRequestType"` -} - -// String returns the string representation -func (s RestoreRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} - if s.GlacierJobParameters != nil { - if err := s.GlacierJobParameters.Validate(); err != nil { - invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) - } - } - if s.OutputLocation != nil { - if err := s.OutputLocation.Validate(); err != nil { - invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) - } - } - if s.SelectParameters != nil { - if err := s.SelectParameters.Validate(); err != nil { - invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDays sets the Days field's value. -func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { - s.Days = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { - s.Description = &v - return s -} - -// SetGlacierJobParameters sets the GlacierJobParameters field's value. -func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { - s.GlacierJobParameters = v - return s -} - -// SetOutputLocation sets the OutputLocation field's value. -func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { - s.OutputLocation = v - return s -} - -// SetSelectParameters sets the SelectParameters field's value. -func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { - s.SelectParameters = v - return s -} - -// SetTier sets the Tier field's value. -func (s *RestoreRequest) SetTier(v string) *RestoreRequest { - s.Tier = &v - return s -} - -// SetType sets the Type field's value. -func (s *RestoreRequest) SetType(v string) *RestoreRequest { - s.Type = &v - return s -} - -// Specifies the redirect behavior and when a redirect is applied. -type RoutingRule struct { - _ struct{} `type:"structure"` - - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition `type:"structure"` - - // Container for redirect information. You can redirect requests to another - // host, to another page, or with another protocol. In the event of an error, - // you can specify a different error code to return. - // - // Redirect is a required field - Redirect *Redirect `type:"structure" required:"true"` -} - -// String returns the string representation -func (s RoutingRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RoutingRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RoutingRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} - if s.Redirect == nil { - invalidParams.Add(request.NewErrParamRequired("Redirect")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCondition sets the Condition field's value. -func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { - s.Condition = v - return s -} - -// SetRedirect sets the Redirect field's value. -func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { - s.Redirect = v - return s -} - -// Specifies lifecycle rules for an Amazon S3 bucket. For more information, -// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) -// in the Amazon Simple Storage Service API Reference. -type Rule struct { - _ struct{} `type:"structure"` - - // Specifies the days since the initiation of an incomplete multipart upload - // that Amazon S3 will wait before permanently removing all parts of the upload. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon Simple Storage Service Developer Guide. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - // Specifies the expiration for the lifecycle of the object. - Expiration *LifecycleExpiration `type:"structure"` - - // Unique identifier for the rule. The value can't be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, - // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning - // is suspended), you can set this action to request that Amazon S3 transition - // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, - // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's - // lifetime. - NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - - // Object key prefix that identifies one or more objects to which this rule - // applies. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` - - // If Enabled, the rule is currently being applied. If Disabled, the rule is - // not currently being applied. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - // Specifies when an object transitions to a specified storage class. - Transition *Transition `type:"structure"` -} - -// String returns the string representation -func (s Rule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Rule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Rule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Rule"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. -func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { - s.AbortIncompleteMultipartUpload = v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { - s.Expiration = v - return s -} - -// SetID sets the ID field's value. -func (s *Rule) SetID(v string) *Rule { - s.ID = &v - return s -} - -// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. -func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { - s.NoncurrentVersionExpiration = v - return s -} - -// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. -func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { - s.NoncurrentVersionTransition = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *Rule) SetPrefix(v string) *Rule { - s.Prefix = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *Rule) SetStatus(v string) *Rule { - s.Status = &v - return s -} - -// SetTransition sets the Transition field's value. -func (s *Rule) SetTransition(v *Transition) *Rule { - s.Transition = v - return s -} - -// Specifies the use of SSE-KMS to encrypt delivered inventory reports. -type SSEKMS struct { - _ struct{} `locationName:"SSE-KMS" type:"structure"` - - // Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer - // managed customer master key (CMK) to use for encrypting inventory reports. - // - // KeyId is a required field - KeyId *string `type:"string" required:"true" sensitive:"true"` -} - -// String returns the string representation -func (s SSEKMS) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SSEKMS) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SSEKMS) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *SSEKMS) SetKeyId(v string) *SSEKMS { - s.KeyId = &v - return s -} - -// Specifies the use of SSE-S3 to encrypt delivered inventory reports. -type SSES3 struct { - _ struct{} `locationName:"SSE-S3" type:"structure"` -} - -// String returns the string representation -func (s SSES3) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SSES3) GoString() string { - return s.String() -} - -// Specifies the byte range of the object to get the records from. A record -// is processed when its first byte is contained by the range. This parameter -// is optional, but when specified, it must not be empty. See RFC 2616, Section -// 14.35.1 about how to specify the start and end of the range. -type ScanRange struct { - _ struct{} `type:"structure"` - - // Specifies the end of the byte range. This parameter is optional. Valid values: - // non-negative integers. The default value is one less than the size of the - // object being queried. If only the End parameter is supplied, it is interpreted - // to mean scan the last N bytes of the file. For example, 50 - // means scan the last 50 bytes. - End *int64 `type:"long"` - - // Specifies the start of the byte range. This parameter is optional. Valid - // values: non-negative integers. The default value is 0. If only start is supplied, - // it means scan from that point to the end of the file.For example; 50 - // means scan from byte 50 until the end of the file. - Start *int64 `type:"long"` -} - -// String returns the string representation -func (s ScanRange) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScanRange) GoString() string { - return s.String() -} - -// SetEnd sets the End field's value. -func (s *ScanRange) SetEnd(v int64) *ScanRange { - s.End = &v - return s -} - -// SetStart sets the Start field's value. -func (s *ScanRange) SetStart(v int64) *ScanRange { - s.Start = &v - return s -} - -// SelectObjectContentEventStreamEvent groups together all EventStream -// events writes for SelectObjectContentEventStream. -// -// These events are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -type SelectObjectContentEventStreamEvent interface { - eventSelectObjectContentEventStream() - eventstreamapi.Marshaler - eventstreamapi.Unmarshaler -} - -// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The -// default implementation for this interface will be SelectObjectContentEventStreamData. -// -// The reader's Close method must allow multiple concurrent calls. -// -// These events are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -type SelectObjectContentEventStreamReader interface { - // Returns a channel of events as they are read from the event stream. - Events() <-chan SelectObjectContentEventStreamEvent - - // Close will stop the reader reading events from the stream. - Close() error - - // Returns any error that has occurred while reading from the event stream. - Err() error -} - -type readSelectObjectContentEventStream struct { - eventReader *eventstreamapi.EventReader - stream chan SelectObjectContentEventStreamEvent - err *eventstreamapi.OnceError - - done chan struct{} - closeOnce sync.Once -} - -func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { - r := &readSelectObjectContentEventStream{ - eventReader: eventReader, - stream: make(chan SelectObjectContentEventStreamEvent), - done: make(chan struct{}), - err: eventstreamapi.NewOnceError(), - } - go r.readEventStream() - - return r -} - -// Close will close the underlying event stream reader. -func (r *readSelectObjectContentEventStream) Close() error { - r.closeOnce.Do(r.safeClose) - return r.Err() -} - -func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { - return r.err.ErrorSet() -} - -func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { - return r.done -} - -func (r *readSelectObjectContentEventStream) safeClose() { - close(r.done) -} - -func (r *readSelectObjectContentEventStream) Err() error { - return r.err.Err() -} - -func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { - return r.stream -} - -func (r *readSelectObjectContentEventStream) readEventStream() { - defer r.Close() - defer close(r.stream) - - for { - event, err := r.eventReader.ReadEvent() - if err != nil { - if err == io.EOF { - return - } - select { - case <-r.done: - // If closed already ignore the error - return - default: - } - r.err.SetError(err) - return - } - - select { - case r.stream <- event.(SelectObjectContentEventStreamEvent): - case <-r.done: - return - } - } -} - -type unmarshalerForSelectObjectContentEventStreamEvent struct { - metadata protocol.ResponseMetadata -} - -func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { - switch eventType { - case "Cont": - return &ContinuationEvent{}, nil - case "End": - return &EndEvent{}, nil - case "Progress": - return &ProgressEvent{}, nil - case "Records": - return &RecordsEvent{}, nil - case "Stats": - return &StatsEvent{}, nil - default: - return nil, awserr.New( - request.ErrCodeSerialization, - fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType), - nil, - ) - } -} - -// Request to filter the contents of an Amazon S3 object based on a simple Structured -// Query Language (SQL) statement. In the request, along with the SQL expression, -// you must specify a data serialization format (JSON or CSV) of the object. -// Amazon S3 uses this to parse object data into records. It returns only records -// that match the specified SQL expression. You must also specify the data serialization -// format for the response. For more information, see S3Select API Documentation -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). -type SelectObjectContentInput struct { - _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The S3 bucket. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The expression that is used to query the object. - // - // Expression is a required field - Expression *string `type:"string" required:"true"` - - // The type of the provided expression (for example, SQL). - // - // ExpressionType is a required field - ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` - - // Describes the format of the data in the object that is being queried. - // - // InputSerialization is a required field - InputSerialization *InputSerialization `type:"structure" required:"true"` - - // The object key. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Describes the format of the data that you want Amazon S3 to return in response. - // - // OutputSerialization is a required field - OutputSerialization *OutputSerialization `type:"structure" required:"true"` - - // Specifies if periodic request progress information should be enabled. - RequestProgress *RequestProgress `type:"structure"` - - // The SSE Algorithm used to encrypt the object. For more information, see Server-Side - // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // The SSE Customer Key. For more information, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // The SSE Customer Key MD5. For more information, see Server-Side Encryption - // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the byte range of the object to get the records from. A record - // is processed when its first byte is contained by the range. This parameter - // is optional, but when specified, it must not be empty. See RFC 2616, Section - // 14.35.1 about how to specify the start and end of the range. - // - // ScanRangemay be used in the following ways: - // - // * 50100 - process only - // the records starting between the bytes 50 and 100 (inclusive, counting - // from zero) - // - // * 50 - process only the records - // starting after the byte 50 - // - // * 50 - process only the records within - // the last 50 bytes of the file. - ScanRange *ScanRange `type:"structure"` -} - -// String returns the string representation -func (s SelectObjectContentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SelectObjectContentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SelectObjectContentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Expression == nil { - invalidParams.Add(request.NewErrParamRequired("Expression")) - } - if s.ExpressionType == nil { - invalidParams.Add(request.NewErrParamRequired("ExpressionType")) - } - if s.InputSerialization == nil { - invalidParams.Add(request.NewErrParamRequired("InputSerialization")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.OutputSerialization == nil { - invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { - s.Bucket = &v - return s -} - -func (s *SelectObjectContentInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpression sets the Expression field's value. -func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { - s.Expression = &v - return s -} - -// SetExpressionType sets the ExpressionType field's value. -func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { - s.ExpressionType = &v - return s -} - -// SetInputSerialization sets the InputSerialization field's value. -func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { - s.InputSerialization = v - return s -} - -// SetKey sets the Key field's value. -func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { - s.Key = &v - return s -} - -// SetOutputSerialization sets the OutputSerialization field's value. -func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { - s.OutputSerialization = v - return s -} - -// SetRequestProgress sets the RequestProgress field's value. -func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { - s.RequestProgress = v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { - s.SSECustomerKey = &v - return s -} - -func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetScanRange sets the ScanRange field's value. -func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { - s.ScanRange = v - return s -} - -func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *SelectObjectContentInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type SelectObjectContentOutput struct { - _ struct{} `type:"structure" payload:"Payload"` - - EventStream *SelectObjectContentEventStream -} - -// String returns the string representation -func (s SelectObjectContentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SelectObjectContentOutput) GoString() string { - return s.String() -} - -func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { - s.EventStream = v - return s -} -func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { - return s.EventStream -} - -// GetStream returns the type to interact with the event stream. -func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { - return s.EventStream -} - -// Describes the parameters for Select job types. -type SelectParameters struct { - _ struct{} `type:"structure"` - - // The expression that is used to query the object. - // - // Expression is a required field - Expression *string `type:"string" required:"true"` - - // The type of the provided expression (for example, SQL). - // - // ExpressionType is a required field - ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` - - // Describes the serialization format of the object. - // - // InputSerialization is a required field - InputSerialization *InputSerialization `type:"structure" required:"true"` - - // Describes how the results of the Select job are serialized. - // - // OutputSerialization is a required field - OutputSerialization *OutputSerialization `type:"structure" required:"true"` -} - -// String returns the string representation -func (s SelectParameters) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SelectParameters) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SelectParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} - if s.Expression == nil { - invalidParams.Add(request.NewErrParamRequired("Expression")) - } - if s.ExpressionType == nil { - invalidParams.Add(request.NewErrParamRequired("ExpressionType")) - } - if s.InputSerialization == nil { - invalidParams.Add(request.NewErrParamRequired("InputSerialization")) - } - if s.OutputSerialization == nil { - invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetExpression sets the Expression field's value. -func (s *SelectParameters) SetExpression(v string) *SelectParameters { - s.Expression = &v - return s -} - -// SetExpressionType sets the ExpressionType field's value. -func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { - s.ExpressionType = &v - return s -} - -// SetInputSerialization sets the InputSerialization field's value. -func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { - s.InputSerialization = v - return s -} - -// SetOutputSerialization sets the OutputSerialization field's value. -func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { - s.OutputSerialization = v - return s -} - -// Describes the default server-side encryption to apply to new objects in the -// bucket. If a PUT Object request doesn't specify any server-side encryption, -// this default encryption will be applied. For more information, see PUT Bucket -// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) -// in the Amazon Simple Storage Service API Reference. -type ServerSideEncryptionByDefault struct { - _ struct{} `type:"structure"` - - // KMS master key ID to use for the default encryption. This parameter is allowed - // if and only if SSEAlgorithm is set to aws:kms. - KMSMasterKeyID *string `type:"string" sensitive:"true"` - - // Server-side encryption algorithm to use for the default encryption. - // - // SSEAlgorithm is a required field - SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` -} - -// String returns the string representation -func (s ServerSideEncryptionByDefault) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ServerSideEncryptionByDefault) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ServerSideEncryptionByDefault) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} - if s.SSEAlgorithm == nil { - invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. -func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { - s.KMSMasterKeyID = &v - return s -} - -// SetSSEAlgorithm sets the SSEAlgorithm field's value. -func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { - s.SSEAlgorithm = &v - return s -} - -// Specifies the default server-side-encryption configuration. -type ServerSideEncryptionConfiguration struct { - _ struct{} `type:"structure"` - - // Container for information about a particular server-side encryption configuration - // rule. - // - // Rules is a required field - Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s ServerSideEncryptionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ServerSideEncryptionConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ServerSideEncryptionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRules sets the Rules field's value. -func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { - s.Rules = v - return s -} - -// Specifies the default server-side encryption configuration. -type ServerSideEncryptionRule struct { - _ struct{} `type:"structure"` - - // Specifies the default server-side encryption to apply to new objects in the - // bucket. If a PUT Object request doesn't specify any server-side encryption, - // this default encryption will be applied. - ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` -} - -// String returns the string representation -func (s ServerSideEncryptionRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ServerSideEncryptionRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ServerSideEncryptionRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} - if s.ApplyServerSideEncryptionByDefault != nil { - if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { - invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. -func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { - s.ApplyServerSideEncryptionByDefault = v - return s -} - -// A container that describes additional filters for identifying the source -// objects that you want to replicate. You can choose to enable or disable the -// replication of these objects. Currently, Amazon S3 supports only the filter -// that you can specify for objects created with server-side encryption using -// a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). -type SourceSelectionCriteria struct { - _ struct{} `type:"structure"` - - // A container for filter information for the selection of Amazon S3 objects - // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication - // configuration, this element is required. - SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` -} - -// String returns the string representation -func (s SourceSelectionCriteria) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SourceSelectionCriteria) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SourceSelectionCriteria) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} - if s.SseKmsEncryptedObjects != nil { - if err := s.SseKmsEncryptedObjects.Validate(); err != nil { - invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. -func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { - s.SseKmsEncryptedObjects = v - return s -} - -// A container for filter information for the selection of S3 objects encrypted -// with AWS KMS. -type SseKmsEncryptedObjects struct { - _ struct{} `type:"structure"` - - // Specifies whether Amazon S3 replicates objects created with server-side encryption - // using a customer master key (CMK) stored in AWS Key Management Service. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` -} - -// String returns the string representation -func (s SseKmsEncryptedObjects) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SseKmsEncryptedObjects) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SseKmsEncryptedObjects) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStatus sets the Status field's value. -func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { - s.Status = &v - return s -} - -// Container for the stats details. -type Stats struct { - _ struct{} `type:"structure"` - - // The total number of uncompressed object bytes processed. - BytesProcessed *int64 `type:"long"` - - // The total number of bytes of records payload data returned. - BytesReturned *int64 `type:"long"` - - // The total number of object bytes scanned. - BytesScanned *int64 `type:"long"` -} - -// String returns the string representation -func (s Stats) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Stats) GoString() string { - return s.String() -} - -// SetBytesProcessed sets the BytesProcessed field's value. -func (s *Stats) SetBytesProcessed(v int64) *Stats { - s.BytesProcessed = &v - return s -} - -// SetBytesReturned sets the BytesReturned field's value. -func (s *Stats) SetBytesReturned(v int64) *Stats { - s.BytesReturned = &v - return s -} - -// SetBytesScanned sets the BytesScanned field's value. -func (s *Stats) SetBytesScanned(v int64) *Stats { - s.BytesScanned = &v - return s -} - -// Container for the Stats Event. -type StatsEvent struct { - _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` - - // The Stats event details. - Details *Stats `locationName:"Details" type:"structure"` -} - -// String returns the string representation -func (s StatsEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StatsEvent) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { - s.Details = v - return s -} - -// The StatsEvent is and event in the SelectObjectContentEventStream group of events. -func (s *StatsEvent) eventSelectObjectContentEventStream() {} - -// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *StatsEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - if err := payloadUnmarshaler.UnmarshalPayload( - bytes.NewReader(msg.Payload), s, - ); err != nil { - return err - } - return nil -} - -func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) - var buf bytes.Buffer - if err = pm.MarshalPayload(&buf, s); err != nil { - return eventstream.Message{}, err - } - msg.Payload = buf.Bytes() - return msg, err -} - -// Specifies data related to access patterns to be collected and made available -// to analyze the tradeoffs between different storage classes for an Amazon -// S3 bucket. -type StorageClassAnalysis struct { - _ struct{} `type:"structure"` - - // Specifies how data related to the storage class analysis for an Amazon S3 - // bucket should be exported. - DataExport *StorageClassAnalysisDataExport `type:"structure"` -} - -// String returns the string representation -func (s StorageClassAnalysis) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StorageClassAnalysis) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StorageClassAnalysis) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} - if s.DataExport != nil { - if err := s.DataExport.Validate(); err != nil { - invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDataExport sets the DataExport field's value. -func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { - s.DataExport = v - return s -} - -// Container for data related to the storage class analysis for an Amazon S3 -// bucket for export. -type StorageClassAnalysisDataExport struct { - _ struct{} `type:"structure"` - - // The place to store the data for an analysis. - // - // Destination is a required field - Destination *AnalyticsExportDestination `type:"structure" required:"true"` - - // The version of the output schema to use when exporting data. Must be V_1. - // - // OutputSchemaVersion is a required field - OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` -} - -// String returns the string representation -func (s StorageClassAnalysisDataExport) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StorageClassAnalysisDataExport) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StorageClassAnalysisDataExport) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.OutputSchemaVersion == nil { - invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDestination sets the Destination field's value. -func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { - s.Destination = v - return s -} - -// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. -func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { - s.OutputSchemaVersion = &v - return s -} - -// A container of a key value name pair. -type Tag struct { - _ struct{} `type:"structure"` - - // Name of the tag. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // Value of the tag. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} - -// Container for TagSet elements. -type Tagging struct { - _ struct{} `type:"structure"` - - // A collection for a set of tags - // - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` -} - -// String returns the string representation -func (s Tagging) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tagging) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tagging) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tagging"} - if s.TagSet == nil { - invalidParams.Add(request.NewErrParamRequired("TagSet")) - } - if s.TagSet != nil { - for i, v := range s.TagSet { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTagSet sets the TagSet field's value. -func (s *Tagging) SetTagSet(v []*Tag) *Tagging { - s.TagSet = v - return s -} - -// Container for granting information. -type TargetGrant struct { - _ struct{} `type:"structure"` - - // Container for the person being granted permissions. - Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Logging permissions assigned to the Grantee for the bucket. - Permission *string `type:"string" enum:"BucketLogsPermission"` -} - -// String returns the string representation -func (s TargetGrant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TargetGrant) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TargetGrant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} - if s.Grantee != nil { - if err := s.Grantee.Validate(); err != nil { - invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantee sets the Grantee field's value. -func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { - s.Grantee = v - return s -} - -// SetPermission sets the Permission field's value. -func (s *TargetGrant) SetPermission(v string) *TargetGrant { - s.Permission = &v - return s -} - -// A container for specifying the configuration for publication of messages -// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 -// detects specified events. -type TopicConfiguration struct { - _ struct{} `type:"structure"` - - // The Amazon S3 bucket event about which to send notifications. For more information, - // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Filter *NotificationConfigurationFilter `type:"structure"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 - // publishes a message when it detects events of the specified type. - // - // TopicArn is a required field - TopicArn *string `locationName:"Topic" type:"string" required:"true"` -} - -// String returns the string representation -func (s TopicConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TopicConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TopicConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEvents sets the Events field's value. -func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { - s.Events = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { - s.Id = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { - s.TopicArn = &v - return s -} - -// A container for specifying the configuration for publication of messages -// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 -// detects specified events. This data type is deprecated. Use TopicConfiguration -// instead. -type TopicConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // Bucket event for which to send notifications. - // - // Deprecated: Event has been deprecated - Event *string `deprecated:"true" type:"string" enum:"Event"` - - // A collection of events related to objects - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Amazon SNS topic to which Amazon S3 will publish a message to report the - // specified events for the bucket. - Topic *string `type:"string"` -} - -// String returns the string representation -func (s TopicConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TopicConfigurationDeprecated) GoString() string { - return s.String() -} - -// SetEvent sets the Event field's value. -func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { - s.Event = &v - return s -} - -// SetEvents sets the Events field's value. -func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { - s.Events = v - return s -} - -// SetId sets the Id field's value. -func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { - s.Id = &v - return s -} - -// SetTopic sets the Topic field's value. -func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { - s.Topic = &v - return s -} - -// Specifies when an object transitions to a specified storage class. -type Transition struct { - _ struct{} `type:"structure"` - - // Indicates when objects are transitioned to the specified storage class. The - // date value must be in ISO 8601 format. The time is always midnight UTC. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the number of days after creation when objects are transitioned - // to the specified storage class. The value must be a positive integer. - Days *int64 `type:"integer"` - - // The storage class to which you want the object to transition. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` -} - -// String returns the string representation -func (s Transition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Transition) GoString() string { - return s.String() -} - -// SetDate sets the Date field's value. -func (s *Transition) SetDate(v time.Time) *Transition { - s.Date = &v - return s -} - -// SetDays sets the Days field's value. -func (s *Transition) SetDays(v int64) *Transition { - s.Days = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Transition) SetStorageClass(v string) *Transition { - s.StorageClass = &v - return s -} - -type UploadPartCopyInput struct { - _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. - // - // CopySource is a required field - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` - - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` - - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` - - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` - - // The range of bytes to copy from the source object. The range value must use - // the form bytes=first-last, where the first and last are the zero-based byte - // offsets to copy. For example, bytes=0-9 indicates that you want to copy the - // first 10 bytes of the source. You can copy a range only if the source object - // is greater than 5 MB. - CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` - - // Specifies the algorithm to use when decrypting the source object (for example, - // AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - - // Object key for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of part being copied. This is a positive integer between 1 and - // 10,000. - // - // PartNumber is a required field - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. This must be the same encryption key specified in the initiate multipart - // upload request. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose part is being copied. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s UploadPartCopyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartCopyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UploadPartCopyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.CopySource == nil { - invalidParams.Add(request.NewErrParamRequired("CopySource")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.PartNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PartNumber")) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { - s.Bucket = &v - return s -} - -func (s *UploadPartCopyInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCopySource sets the CopySource field's value. -func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { - s.CopySource = &v - return s -} - -// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. -func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { - s.CopySourceIfMatch = &v - return s -} - -// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. -func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { - s.CopySourceIfModifiedSince = &v - return s -} - -// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. -func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { - s.CopySourceIfNoneMatch = &v - return s -} - -// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. -func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { - s.CopySourceIfUnmodifiedSince = &v - return s -} - -// SetCopySourceRange sets the CopySourceRange field's value. -func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { - s.CopySourceRange = &v - return s -} - -// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. -func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { - s.CopySourceSSECustomerAlgorithm = &v - return s -} - -// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. -func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { - s.CopySourceSSECustomerKey = &v - return s -} - -func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { - if s.CopySourceSSECustomerKey == nil { - return v - } - return *s.CopySourceSSECustomerKey -} - -// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. -func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { - s.CopySourceSSECustomerKeyMD5 = &v - return s -} - -// SetKey sets the Key field's value. -func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { - s.PartNumber = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { - s.SSECustomerKey = &v - return s -} - -func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { - s.UploadId = &v - return s -} - -func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *UploadPartCopyInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type UploadPartCopyOutput struct { - _ struct{} `type:"structure" payload:"CopyPartResult"` - - // Container for all response elements. - CopyPartResult *CopyPartResult `type:"structure"` - - // The version of the source object that was copied, if you have enabled versioning - // on the source bucket. - CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) that was used for the - // object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` -} - -// String returns the string representation -func (s UploadPartCopyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartCopyOutput) GoString() string { - return s.String() -} - -// SetCopyPartResult sets the CopyPartResult field's value. -func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { - s.CopyPartResult = v - return s -} - -// SetCopySourceVersionId sets the CopySourceVersionId field's value. -func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { - s.CopySourceVersionId = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { - s.ServerSideEncryption = &v - return s -} - -type UploadPartInput struct { - _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` - - // Object data. - Body io.ReadSeeker `type:"blob"` - - // Name of the bucket to which the multipart upload was initiated. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameter is required - // if object lock parameters are specified. - ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - - // Object key for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of part being uploaded. This is a positive integer between 1 - // and 10,000. - // - // PartNumber is a required field - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. This must be the same encryption key specified in the initiate multipart - // upload request. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose part is being uploaded. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s UploadPartInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UploadPartInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.PartNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PartNumber")) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBody sets the Body field's value. -func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { - s.Body = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { - s.Bucket = &v - return s -} - -func (s *UploadPartInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetContentLength sets the ContentLength field's value. -func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { - s.ContentLength = &v - return s -} - -// SetContentMD5 sets the ContentMD5 field's value. -func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { - s.ContentMD5 = &v - return s -} - -// SetKey sets the Key field's value. -func (s *UploadPartInput) SetKey(v string) *UploadPartInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { - s.PartNumber = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { - s.SSECustomerKey = &v - return s -} - -func (s *UploadPartInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { - s.UploadId = &v - return s -} - -func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *UploadPartInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -type UploadPartOutput struct { - _ struct{} `type:"structure"` - - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetric customer managed customer master key (CMK) was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` -} - -// String returns the string representation -func (s UploadPartOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartOutput) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { - s.ETag = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { - s.ServerSideEncryption = &v - return s -} - -// Describes the versioning state of an Amazon S3 bucket. For more information, -// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) -// in the Amazon Simple Storage Service API Reference. -type VersioningConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` -} - -// String returns the string representation -func (s VersioningConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VersioningConfiguration) GoString() string { - return s.String() -} - -// SetMFADelete sets the MFADelete field's value. -func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { - s.MFADelete = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { - s.Status = &v - return s -} - -// Specifies website configuration parameters for an Amazon S3 bucket. -type WebsiteConfiguration struct { - _ struct{} `type:"structure"` - - // The name of the error document for the website. - ErrorDocument *ErrorDocument `type:"structure"` - - // The name of the index document for the website. - IndexDocument *IndexDocument `type:"structure"` - - // The redirect behavior for every request to this bucket's website endpoint. - // - // If you specify this property, you can't specify any other property. - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - // Rules that define when a redirect is applied and the redirect behavior. - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` -} - -// String returns the string representation -func (s WebsiteConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WebsiteConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WebsiteConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} - if s.ErrorDocument != nil { - if err := s.ErrorDocument.Validate(); err != nil { - invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) - } - } - if s.IndexDocument != nil { - if err := s.IndexDocument.Validate(); err != nil { - invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) - } - } - if s.RedirectAllRequestsTo != nil { - if err := s.RedirectAllRequestsTo.Validate(); err != nil { - invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) - } - } - if s.RoutingRules != nil { - for i, v := range s.RoutingRules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetErrorDocument sets the ErrorDocument field's value. -func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { - s.ErrorDocument = v - return s -} - -// SetIndexDocument sets the IndexDocument field's value. -func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { - s.IndexDocument = v - return s -} - -// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. -func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { - s.RedirectAllRequestsTo = v - return s -} - -// SetRoutingRules sets the RoutingRules field's value. -func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { - s.RoutingRules = v - return s -} - -const ( - // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value - AnalyticsS3ExportFileFormatCsv = "CSV" -) - -const ( - // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value - BucketAccelerateStatusEnabled = "Enabled" - - // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value - BucketAccelerateStatusSuspended = "Suspended" -) - -const ( - // BucketCannedACLPrivate is a BucketCannedACL enum value - BucketCannedACLPrivate = "private" - - // BucketCannedACLPublicRead is a BucketCannedACL enum value - BucketCannedACLPublicRead = "public-read" - - // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value - BucketCannedACLPublicReadWrite = "public-read-write" - - // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value - BucketCannedACLAuthenticatedRead = "authenticated-read" -) - -const ( - // BucketLocationConstraintEu is a BucketLocationConstraint enum value - BucketLocationConstraintEu = "EU" - - // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value - BucketLocationConstraintEuWest1 = "eu-west-1" - - // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value - BucketLocationConstraintUsWest1 = "us-west-1" - - // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value - BucketLocationConstraintUsWest2 = "us-west-2" - - // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value - BucketLocationConstraintApSouth1 = "ap-south-1" - - // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value - BucketLocationConstraintApSoutheast1 = "ap-southeast-1" - - // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value - BucketLocationConstraintApSoutheast2 = "ap-southeast-2" - - // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value - BucketLocationConstraintApNortheast1 = "ap-northeast-1" - - // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value - BucketLocationConstraintSaEast1 = "sa-east-1" - - // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value - BucketLocationConstraintCnNorth1 = "cn-north-1" - - // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value - BucketLocationConstraintEuCentral1 = "eu-central-1" -) - -const ( - // BucketLogsPermissionFullControl is a BucketLogsPermission enum value - BucketLogsPermissionFullControl = "FULL_CONTROL" - - // BucketLogsPermissionRead is a BucketLogsPermission enum value - BucketLogsPermissionRead = "READ" - - // BucketLogsPermissionWrite is a BucketLogsPermission enum value - BucketLogsPermissionWrite = "WRITE" -) - -const ( - // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value - BucketVersioningStatusEnabled = "Enabled" - - // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value - BucketVersioningStatusSuspended = "Suspended" -) - -const ( - // CompressionTypeNone is a CompressionType enum value - CompressionTypeNone = "NONE" - - // CompressionTypeGzip is a CompressionType enum value - CompressionTypeGzip = "GZIP" - - // CompressionTypeBzip2 is a CompressionType enum value - CompressionTypeBzip2 = "BZIP2" -) - -const ( - // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value - DeleteMarkerReplicationStatusEnabled = "Enabled" - - // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value - DeleteMarkerReplicationStatusDisabled = "Disabled" -) - -// Requests Amazon S3 to encode the object keys in the response and specifies -// the encoding method to use. An object key may contain any Unicode character; -// however, XML 1.0 parser cannot parse some characters, such as characters -// with an ASCII value from 0 to 10. For characters that are not supported in -// XML 1.0, you can add this parameter to request that Amazon S3 encode the -// keys in the response. -const ( - // EncodingTypeUrl is a EncodingType enum value - EncodingTypeUrl = "url" -) - -// The bucket event for which to send notifications. -const ( - // EventS3ReducedRedundancyLostObject is a Event enum value - EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" - - // EventS3ObjectCreated is a Event enum value - EventS3ObjectCreated = "s3:ObjectCreated:*" - - // EventS3ObjectCreatedPut is a Event enum value - EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" - - // EventS3ObjectCreatedPost is a Event enum value - EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" - - // EventS3ObjectCreatedCopy is a Event enum value - EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" - - // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value - EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - - // EventS3ObjectRemoved is a Event enum value - EventS3ObjectRemoved = "s3:ObjectRemoved:*" - - // EventS3ObjectRemovedDelete is a Event enum value - EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - - // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value - EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" - - // EventS3ObjectRestore is a Event enum value - EventS3ObjectRestore = "s3:ObjectRestore:*" - - // EventS3ObjectRestorePost is a Event enum value - EventS3ObjectRestorePost = "s3:ObjectRestore:Post" - - // EventS3ObjectRestoreCompleted is a Event enum value - EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" - - // EventS3Replication is a Event enum value - EventS3Replication = "s3:Replication:*" - - // EventS3ReplicationOperationFailedReplication is a Event enum value - EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" - - // EventS3ReplicationOperationNotTracked is a Event enum value - EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" - - // EventS3ReplicationOperationMissedThreshold is a Event enum value - EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" - - // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value - EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" -) - -const ( - // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value - ExistingObjectReplicationStatusEnabled = "Enabled" - - // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value - ExistingObjectReplicationStatusDisabled = "Disabled" -) - -const ( - // ExpirationStatusEnabled is a ExpirationStatus enum value - ExpirationStatusEnabled = "Enabled" - - // ExpirationStatusDisabled is a ExpirationStatus enum value - ExpirationStatusDisabled = "Disabled" -) - -const ( - // ExpressionTypeSql is a ExpressionType enum value - ExpressionTypeSql = "SQL" -) - -const ( - // FileHeaderInfoUse is a FileHeaderInfo enum value - FileHeaderInfoUse = "USE" - - // FileHeaderInfoIgnore is a FileHeaderInfo enum value - FileHeaderInfoIgnore = "IGNORE" - - // FileHeaderInfoNone is a FileHeaderInfo enum value - FileHeaderInfoNone = "NONE" -) - -const ( - // FilterRuleNamePrefix is a FilterRuleName enum value - FilterRuleNamePrefix = "prefix" - - // FilterRuleNameSuffix is a FilterRuleName enum value - FilterRuleNameSuffix = "suffix" -) - -const ( - // InventoryFormatCsv is a InventoryFormat enum value - InventoryFormatCsv = "CSV" - - // InventoryFormatOrc is a InventoryFormat enum value - InventoryFormatOrc = "ORC" - - // InventoryFormatParquet is a InventoryFormat enum value - InventoryFormatParquet = "Parquet" -) - -const ( - // InventoryFrequencyDaily is a InventoryFrequency enum value - InventoryFrequencyDaily = "Daily" - - // InventoryFrequencyWeekly is a InventoryFrequency enum value - InventoryFrequencyWeekly = "Weekly" -) - -const ( - // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value - InventoryIncludedObjectVersionsAll = "All" - - // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value - InventoryIncludedObjectVersionsCurrent = "Current" -) - -const ( - // InventoryOptionalFieldSize is a InventoryOptionalField enum value - InventoryOptionalFieldSize = "Size" - - // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value - InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" - - // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value - InventoryOptionalFieldStorageClass = "StorageClass" - - // InventoryOptionalFieldEtag is a InventoryOptionalField enum value - InventoryOptionalFieldEtag = "ETag" - - // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value - InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" - - // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value - InventoryOptionalFieldReplicationStatus = "ReplicationStatus" - - // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value - InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" - - // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value - InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" - - // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value - InventoryOptionalFieldObjectLockMode = "ObjectLockMode" - - // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value - InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" - - // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value - InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" -) - -const ( - // JSONTypeDocument is a JSONType enum value - JSONTypeDocument = "DOCUMENT" - - // JSONTypeLines is a JSONType enum value - JSONTypeLines = "LINES" -) - -const ( - // MFADeleteEnabled is a MFADelete enum value - MFADeleteEnabled = "Enabled" - - // MFADeleteDisabled is a MFADelete enum value - MFADeleteDisabled = "Disabled" -) - -const ( - // MFADeleteStatusEnabled is a MFADeleteStatus enum value - MFADeleteStatusEnabled = "Enabled" - - // MFADeleteStatusDisabled is a MFADeleteStatus enum value - MFADeleteStatusDisabled = "Disabled" -) - -const ( - // MetadataDirectiveCopy is a MetadataDirective enum value - MetadataDirectiveCopy = "COPY" - - // MetadataDirectiveReplace is a MetadataDirective enum value - MetadataDirectiveReplace = "REPLACE" -) - -const ( - // MetricsStatusEnabled is a MetricsStatus enum value - MetricsStatusEnabled = "Enabled" - - // MetricsStatusDisabled is a MetricsStatus enum value - MetricsStatusDisabled = "Disabled" -) - -const ( - // ObjectCannedACLPrivate is a ObjectCannedACL enum value - ObjectCannedACLPrivate = "private" - - // ObjectCannedACLPublicRead is a ObjectCannedACL enum value - ObjectCannedACLPublicRead = "public-read" - - // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value - ObjectCannedACLPublicReadWrite = "public-read-write" - - // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value - ObjectCannedACLAuthenticatedRead = "authenticated-read" - - // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value - ObjectCannedACLAwsExecRead = "aws-exec-read" - - // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value - ObjectCannedACLBucketOwnerRead = "bucket-owner-read" - - // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value - ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" -) - -const ( - // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value - ObjectLockEnabledEnabled = "Enabled" -) - -const ( - // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value - ObjectLockLegalHoldStatusOn = "ON" - - // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value - ObjectLockLegalHoldStatusOff = "OFF" -) - -const ( - // ObjectLockModeGovernance is a ObjectLockMode enum value - ObjectLockModeGovernance = "GOVERNANCE" - - // ObjectLockModeCompliance is a ObjectLockMode enum value - ObjectLockModeCompliance = "COMPLIANCE" -) - -const ( - // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value - ObjectLockRetentionModeGovernance = "GOVERNANCE" - - // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value - ObjectLockRetentionModeCompliance = "COMPLIANCE" -) - -const ( - // ObjectStorageClassStandard is a ObjectStorageClass enum value - ObjectStorageClassStandard = "STANDARD" - - // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value - ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - - // ObjectStorageClassGlacier is a ObjectStorageClass enum value - ObjectStorageClassGlacier = "GLACIER" - - // ObjectStorageClassStandardIa is a ObjectStorageClass enum value - ObjectStorageClassStandardIa = "STANDARD_IA" - - // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value - ObjectStorageClassOnezoneIa = "ONEZONE_IA" - - // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value - ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" - - // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value - ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" -) - -const ( - // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value - ObjectVersionStorageClassStandard = "STANDARD" -) - -const ( - // OwnerOverrideDestination is a OwnerOverride enum value - OwnerOverrideDestination = "Destination" -) - -const ( - // PayerRequester is a Payer enum value - PayerRequester = "Requester" - - // PayerBucketOwner is a Payer enum value - PayerBucketOwner = "BucketOwner" -) - -const ( - // PermissionFullControl is a Permission enum value - PermissionFullControl = "FULL_CONTROL" - - // PermissionWrite is a Permission enum value - PermissionWrite = "WRITE" - - // PermissionWriteAcp is a Permission enum value - PermissionWriteAcp = "WRITE_ACP" - - // PermissionRead is a Permission enum value - PermissionRead = "READ" - - // PermissionReadAcp is a Permission enum value - PermissionReadAcp = "READ_ACP" -) - -const ( - // ProtocolHttp is a Protocol enum value - ProtocolHttp = "http" - - // ProtocolHttps is a Protocol enum value - ProtocolHttps = "https" -) - -const ( - // QuoteFieldsAlways is a QuoteFields enum value - QuoteFieldsAlways = "ALWAYS" - - // QuoteFieldsAsneeded is a QuoteFields enum value - QuoteFieldsAsneeded = "ASNEEDED" -) - -const ( - // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value - ReplicationRuleStatusEnabled = "Enabled" - - // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value - ReplicationRuleStatusDisabled = "Disabled" -) - -const ( - // ReplicationStatusComplete is a ReplicationStatus enum value - ReplicationStatusComplete = "COMPLETE" - - // ReplicationStatusPending is a ReplicationStatus enum value - ReplicationStatusPending = "PENDING" - - // ReplicationStatusFailed is a ReplicationStatus enum value - ReplicationStatusFailed = "FAILED" - - // ReplicationStatusReplica is a ReplicationStatus enum value - ReplicationStatusReplica = "REPLICA" -) - -const ( - // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value - ReplicationTimeStatusEnabled = "Enabled" - - // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value - ReplicationTimeStatusDisabled = "Disabled" -) - -// If present, indicates that the requester was successfully charged for the -// request. -const ( - // RequestChargedRequester is a RequestCharged enum value - RequestChargedRequester = "requester" -) - -// Confirms that the requester knows that they will be charged for the request. -// Bucket owners need not specify this parameter in their requests. For information -// about downloading objects from requester pays buckets, see Downloading Objects -// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) -// in the Amazon S3 Developer Guide. -const ( - // RequestPayerRequester is a RequestPayer enum value - RequestPayerRequester = "requester" -) - -const ( - // RestoreRequestTypeSelect is a RestoreRequestType enum value - RestoreRequestTypeSelect = "SELECT" -) - -const ( - // ServerSideEncryptionAes256 is a ServerSideEncryption enum value - ServerSideEncryptionAes256 = "AES256" - - // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value - ServerSideEncryptionAwsKms = "aws:kms" -) - -const ( - // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value - SseKmsEncryptedObjectsStatusEnabled = "Enabled" - - // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value - SseKmsEncryptedObjectsStatusDisabled = "Disabled" -) - -const ( - // StorageClassStandard is a StorageClass enum value - StorageClassStandard = "STANDARD" - - // StorageClassReducedRedundancy is a StorageClass enum value - StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - - // StorageClassStandardIa is a StorageClass enum value - StorageClassStandardIa = "STANDARD_IA" - - // StorageClassOnezoneIa is a StorageClass enum value - StorageClassOnezoneIa = "ONEZONE_IA" - - // StorageClassIntelligentTiering is a StorageClass enum value - StorageClassIntelligentTiering = "INTELLIGENT_TIERING" - - // StorageClassGlacier is a StorageClass enum value - StorageClassGlacier = "GLACIER" - - // StorageClassDeepArchive is a StorageClass enum value - StorageClassDeepArchive = "DEEP_ARCHIVE" -) - -const ( - // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value - StorageClassAnalysisSchemaVersionV1 = "V_1" -) - -const ( - // TaggingDirectiveCopy is a TaggingDirective enum value - TaggingDirectiveCopy = "COPY" - - // TaggingDirectiveReplace is a TaggingDirective enum value - TaggingDirectiveReplace = "REPLACE" -) - -const ( - // TierStandard is a Tier enum value - TierStandard = "Standard" - - // TierBulk is a Tier enum value - TierBulk = "Bulk" - - // TierExpedited is a Tier enum value - TierExpedited = "Expedited" -) - -const ( - // TransitionStorageClassGlacier is a TransitionStorageClass enum value - TransitionStorageClassGlacier = "GLACIER" - - // TransitionStorageClassStandardIa is a TransitionStorageClass enum value - TransitionStorageClassStandardIa = "STANDARD_IA" - - // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value - TransitionStorageClassOnezoneIa = "ONEZONE_IA" - - // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value - TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" - - // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value - TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" -) - -const ( - // TypeCanonicalUser is a Type enum value - TypeCanonicalUser = "CanonicalUser" - - // TypeAmazonCustomerByEmail is a Type enum value - TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" - - // TypeGroup is a Type enum value - TypeGroup = "Group" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go deleted file mode 100644 index 5c8ce5cc..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go +++ /dev/null @@ -1,249 +0,0 @@ -package s3 - -import ( - "bytes" - "crypto/md5" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "fmt" - "hash" - "io" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - contentMD5Header = "Content-Md5" - contentSha256Header = "X-Amz-Content-Sha256" - amzTeHeader = "X-Amz-Te" - amzTxEncodingHeader = "X-Amz-Transfer-Encoding" - - appendMD5TxEncoding = "append-md5" -) - -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - if !aws.IsReaderSeekable(r.Body) { - if r.Config.Logger != nil { - r.Config.Logger.Log(fmt.Sprintf( - "Unable to compute Content-MD5 for unseekable body, S3.%s", - r.Operation.Name)) - } - return - } - - if _, err := copySeekableBody(h, r.Body); err != nil { - r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - v := base64.StdEncoding.EncodeToString(h.Sum(nil)) - r.HTTPRequest.Header.Set(contentMD5Header, v) -} - -// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the -// request. If the body is not seekable or S3DisableContentMD5Validation set -// this handler will be ignored. -func computeBodyHashes(r *request.Request) { - if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { - return - } - if r.IsPresigned() { - return - } - if r.Error != nil || !aws.IsReaderSeekable(r.Body) { - return - } - - var md5Hash, sha256Hash hash.Hash - hashers := make([]io.Writer, 0, 2) - - // Determine upfront which hashes can be set without overriding user - // provide header data. - if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { - md5Hash = md5.New() - hashers = append(hashers, md5Hash) - } - - if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { - sha256Hash = sha256.New() - hashers = append(hashers, sha256Hash) - } - - // Create the destination writer based on the hashes that are not already - // provided by the user. - var dst io.Writer - switch len(hashers) { - case 0: - return - case 1: - dst = hashers[0] - default: - dst = io.MultiWriter(hashers...) - } - - if _, err := copySeekableBody(dst, r.Body); err != nil { - r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) - return - } - - // For the hashes created, set the associated headers that the user did not - // already provide. - if md5Hash != nil { - sum := make([]byte, md5.Size) - encoded := make([]byte, md5Base64EncLen) - - base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) - r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} - } - - if sha256Hash != nil { - encoded := make([]byte, sha256HexEncLen) - sum := make([]byte, sha256.Size) - - hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) - r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} - } -} - -const ( - md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen - sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen -) - -func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { - curPos, err := src.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - n, err := io.Copy(dst, src) - if err != nil { - return n, err - } - - _, err = src.Seek(curPos, sdkio.SeekStart) - if err != nil { - return n, err - } - - return n, nil -} - -// Adds the x-amz-te: append_md5 header to the request. This requests the service -// responds with a trailing MD5 checksum. -// -// Will not ask for append MD5 if disabled, the request is presigned or, -// or the API operation does not support content MD5 validation. -func askForTxEncodingAppendMD5(r *request.Request) { - if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { - return - } - if r.IsPresigned() { - return - } - r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) -} - -func useMD5ValidationReader(r *request.Request) { - if r.Error != nil { - return - } - - if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { - return - } - - var bodyReader *io.ReadCloser - var contentLen int64 - switch tv := r.Data.(type) { - case *GetObjectOutput: - bodyReader = &tv.Body - contentLen = aws.Int64Value(tv.ContentLength) - // Update ContentLength hiden the trailing MD5 checksum. - tv.ContentLength = aws.Int64(contentLen - md5.Size) - tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) - default: - r.Error = awserr.New("ChecksumValidationError", - fmt.Sprintf("%s: %s header received on unsupported API, %s", - amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, - ), nil) - return - } - - if contentLen < md5.Size { - r.Error = awserr.New("ChecksumValidationError", - fmt.Sprintf("invalid Content-Length %d for %s %s", - contentLen, appendMD5TxEncoding, amzTxEncodingHeader, - ), nil) - return - } - - // Wrap and swap the response body reader with the validation reader. - *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) -} - -type md5ValidationReader struct { - rawReader io.ReadCloser - payload io.Reader - hash hash.Hash - - payloadLen int64 - read int64 -} - -func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { - h := md5.New() - return &md5ValidationReader{ - rawReader: reader, - payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), - hash: h, - payloadLen: payloadLen, - } -} - -func (v *md5ValidationReader) Read(p []byte) (n int, err error) { - n, err = v.payload.Read(p) - if err != nil && err != io.EOF { - return n, err - } - - v.read += int64(n) - - if err == io.EOF { - if v.read != v.payloadLen { - return n, io.ErrUnexpectedEOF - } - expectSum := make([]byte, md5.Size) - actualSum := make([]byte, md5.Size) - if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { - return n, sumReadErr - } - actualSum = v.hash.Sum(actualSum[0:0]) - if !bytes.Equal(expectSum, actualSum) { - return n, awserr.New("InvalidChecksum", - fmt.Sprintf("expected MD5 checksum %s, got %s", - hex.EncodeToString(expectSum), - hex.EncodeToString(actualSum), - ), - nil) - } - } - - return n, err -} - -func (v *md5ValidationReader) Close() error { - return v.rawReader.Close() -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go deleted file mode 100644 index 9ba8a788..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ /dev/null @@ -1,107 +0,0 @@ -package s3 - -import ( - "io/ioutil" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) - -// NormalizeBucketLocation is a utility function which will update the -// passed in value to always be a region ID. Generally this would be used -// with GetBucketLocation API operation. -// -// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html -// for more information on the values that can be returned. -func NormalizeBucketLocation(loc string) string { - switch loc { - case "": - loc = "us-east-1" - case "EU": - loc = "eu-west-1" - } - - return loc -} - -// NormalizeBucketLocationHandler is a request handler which will update the -// GetBucketLocation's result LocationConstraint value to always be a region ID. -// -// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html -// for more information on the values that can be returned. -// -// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ -// Bucket: aws.String(bucket), -// }) -// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) -// err := req.Send() -var NormalizeBucketLocationHandler = request.NamedHandler{ - Name: "awssdk.s3.NormalizeBucketLocation", - Fn: func(req *request.Request) { - if req.Error != nil { - return - } - - out := req.Data.(*GetBucketLocationOutput) - loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) - out.LocationConstraint = aws.String(loc) - }, -} - -// WithNormalizeBucketLocation is a request option which will update the -// GetBucketLocation's result LocationConstraint value to always be a region ID. -// -// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html -// for more information on the values that can be returned. -// -// result, err := svc.GetBucketLocationWithContext(ctx, -// &s3.GetBucketLocationInput{ -// Bucket: aws.String(bucket), -// }, -// s3.WithNormalizeBucketLocation, -// ) -func WithNormalizeBucketLocation(r *request.Request) { - r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) -} - -func buildGetBucketLocation(r *request.Request) { - if r.DataFilled() { - out := r.Data.(*GetBucketLocationOutput) - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, - "failed reading response body", err) - return - } - - match := reBucketLocation.FindSubmatch(b) - if len(match) > 1 { - loc := string(match[1]) - out.LocationConstraint = aws.String(loc) - } - } -} - -func populateLocationConstraint(r *request.Request) { - if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { - in := r.Params.(*CreateBucketInput) - if in.CreateBucketConfiguration == nil { - r.Params = awsutil.CopyOf(r.Params) - in = r.Params.(*CreateBucketInput) - in.CreateBucketConfiguration = &CreateBucketConfiguration{ - LocationConstraint: r.Config.Region, - } - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go deleted file mode 100644 index 036d0b2e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ /dev/null @@ -1,81 +0,0 @@ -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/s3err" - "github.com/aws/aws-sdk-go/service/s3/internal/arn" -) - -func init() { - initClient = defaultInitClientFn - initRequest = defaultInitRequestFn -} - -func defaultInitClientFn(c *client.Client) { - // Support building custom endpoints based on config - c.Handlers.Build.PushFront(endpointHandler) - - // Require SSL when using SSE keys - c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeyMD5) - c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) - - // S3 uses custom error unmarshaling logic - c.Handlers.UnmarshalError.Clear() - c.Handlers.UnmarshalError.PushBack(unmarshalError) - c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) -} - -func defaultInitRequestFn(r *request.Request) { - // Add request handlers for specific platforms. - // e.g. 100-continue support for PUT requests using Go 1.6 - platformRequestHandlers(r) - - switch r.Operation.Name { - case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, - opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, - opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration, - opPutBucketReplication: - // These S3 operations require Content-MD5 to be set - r.Handlers.Build.PushBack(contentMD5) - case opGetBucketLocation: - // GetBucketLocation has custom parsing logic - r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) - case opCreateBucket: - // Auto-populate LocationConstraint with current region - r.Handlers.Validate.PushFront(populateLocationConstraint) - case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: - r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) - r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) - case opPutObject, opUploadPart: - r.Handlers.Build.PushBack(computeBodyHashes) - // Disabled until #1837 root issue is resolved. - // case opGetObject: - // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) - // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) - } -} - -// bucketGetter is an accessor interface to grab the "Bucket" field from -// an S3 type. -type bucketGetter interface { - getBucket() string -} - -// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" -// field from an S3 type. -type sseCustomerKeyGetter interface { - getSSECustomerKey() string -} - -// copySourceSSECustomerKeyGetter is an accessor interface to grab the -// "CopySourceSSECustomerKey" field from an S3 type. -type copySourceSSECustomerKeyGetter interface { - getCopySourceSSECustomerKey() string -} - -type endpointARNGetter interface { - getEndpointARN() (arn.Resource, error) - hasEndpointARN() bool -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go deleted file mode 100644 index 0def0225..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package s3 provides the client and types for making API -// requests to Amazon Simple Storage Service. -// -// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. -// -// See s3 package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ -// -// Using the Client -// -// To contact Amazon Simple Storage Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon Simple Storage Service client S3 for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New -package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go deleted file mode 100644 index 4b65f715..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ /dev/null @@ -1,123 +0,0 @@ -// Upload Managers -// -// The s3manager package's Uploader provides concurrent upload of content to S3 -// by taking advantage of S3's Multipart APIs. The Uploader also supports both -// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker -// for optimizations if the Body satisfies that type. Once the Uploader instance -// is created you can call Upload concurrently from multiple goroutines safely. -// -// // The session the S3 Uploader will use -// sess := session.Must(session.NewSession()) -// -// // Create an uploader with the session and default options -// uploader := s3manager.NewUploader(sess) -// -// f, err := os.Open(filename) -// if err != nil { -// return fmt.Errorf("failed to open file %q, %v", filename, err) -// } -// -// // Upload the file to S3. -// result, err := uploader.Upload(&s3manager.UploadInput{ -// Bucket: aws.String(myBucket), -// Key: aws.String(myString), -// Body: f, -// }) -// if err != nil { -// return fmt.Errorf("failed to upload file, %v", err) -// } -// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) -// -// See the s3manager package's Uploader type documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader -// -// Download Manager -// -// The s3manager package's Downloader provides concurrently downloading of Objects -// from S3. The Downloader will write S3 Object content with an io.WriterAt. -// Once the Downloader instance is created you can call Download concurrently from -// multiple goroutines safely. -// -// // The session the S3 Downloader will use -// sess := session.Must(session.NewSession()) -// -// // Create a downloader with the session and default options -// downloader := s3manager.NewDownloader(sess) -// -// // Create a file to write the S3 Object contents to. -// f, err := os.Create(filename) -// if err != nil { -// return fmt.Errorf("failed to create file %q, %v", filename, err) -// } -// -// // Write the contents of S3 Object to the file -// n, err := downloader.Download(f, &s3.GetObjectInput{ -// Bucket: aws.String(myBucket), -// Key: aws.String(myString), -// }) -// if err != nil { -// return fmt.Errorf("failed to download file, %v", err) -// } -// fmt.Printf("file downloaded, %d bytes\n", n) -// -// See the s3manager package's Downloader type documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader -// -// Automatic URI cleaning -// -// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) -// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct -// used by the service client. -// -// svc := s3.New(sess, &aws.Config{ -// DisableRestProtocolURICleaning: aws.Bool(true), -// }) -// out, err := svc.GetObject(&s3.GetObjectInput { -// Bucket: aws.String("bucketname"), -// Key: aws.String("//foo//bar//moo"), -// }) -// -// Get Bucket Region -// -// GetBucketRegion will attempt to get the region for a bucket using a region -// hint to determine which AWS partition to perform the query on. Use this utility -// to determine the region a bucket is in. -// -// sess := session.Must(session.NewSession()) -// -// bucket := "my-bucket" -// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") -// if err != nil { -// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { -// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) -// } -// return err -// } -// fmt.Printf("Bucket %s is in %s region\n", bucket, region) -// -// See the s3manager package's GetBucketRegion function documentation for more information -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion -// -// S3 Crypto Client -// -// The s3crypto package provides the tools to upload and download encrypted -// content from S3. The Encryption and Decryption clients can be used concurrently -// once the client is created. -// -// sess := session.Must(session.NewSession()) -// -// // Create the decryption client. -// svc := s3crypto.NewDecryptionClient(sess) -// -// // The object will be downloaded from S3 and decrypted locally. By metadata -// // about the object's encryption will instruct the decryption client how -// // decrypt the content of the object. By default KMS is used for keys. -// result, err := svc.GetObject(&s3.GetObjectInput { -// Bucket: aws.String(myBucket), -// Key: aws.String(myKey), -// }) -// -// See the s3crypto package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ -// -package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go deleted file mode 100644 index c4048fbf..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go +++ /dev/null @@ -1,233 +0,0 @@ -package s3 - -import ( - "net/url" - "strings" - - "github.com/aws/aws-sdk-go/aws" - awsarn "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/service/s3/internal/arn" -) - -// Used by shapes with members decorated as endpoint ARN. -func parseEndpointARN(v string) (arn.Resource, error) { - return arn.ParseResource(v, accessPointResourceParser) -} - -func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { - resParts := arn.SplitResource(a.Resource) - switch resParts[0] { - case "accesspoint": - return arn.ParseAccessPointResource(a, resParts[1:]) - default: - return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} - } -} - -func endpointHandler(req *request.Request) { - endpoint, ok := req.Params.(endpointARNGetter) - if !ok || !endpoint.hasEndpointARN() { - updateBucketEndpointFromParams(req) - return - } - - resource, err := endpoint.getEndpointARN() - if err != nil { - req.Error = newInvalidARNError(nil, err) - return - } - - resReq := resourceRequest{ - Resource: resource, - Request: req, - } - - if resReq.IsCrossPartition() { - req.Error = newClientPartitionMismatchError(resource, - req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) - return - } - - if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { - req.Error = newClientRegionMismatchError(resource, - req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) - return - } - - if resReq.HasCustomEndpoint() { - req.Error = newInvalidARNWithCustomEndpointError(resource, nil) - return - } - - switch tv := resource.(type) { - case arn.AccessPointARN: - err = updateRequestAccessPointEndpoint(req, tv) - if err != nil { - req.Error = err - } - default: - req.Error = newInvalidARNError(resource, nil) - } -} - -type resourceRequest struct { - Resource arn.Resource - Request *request.Request -} - -func (r resourceRequest) ARN() awsarn.ARN { - return r.Resource.GetARN() -} - -func (r resourceRequest) AllowCrossRegion() bool { - return aws.BoolValue(r.Request.Config.S3UseARNRegion) -} - -func (r resourceRequest) UseFIPS() bool { - return isFIPS(aws.StringValue(r.Request.Config.Region)) -} - -func (r resourceRequest) IsCrossPartition() bool { - return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition -} - -func (r resourceRequest) IsCrossRegion() bool { - return isCrossRegion(r.Request, r.Resource.GetARN().Region) -} - -func (r resourceRequest) HasCustomEndpoint() bool { - return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 -} - -func isFIPS(clientRegion string) bool { - return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") -} -func isCrossRegion(req *request.Request, otherRegion string) bool { - return req.ClientInfo.SigningRegion != otherRegion -} - -func updateBucketEndpointFromParams(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucket name was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - updateEndpointForS3Config(r, bucket) -} - -func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { - // Accelerate not supported - if aws.BoolValue(req.Config.S3UseAccelerate) { - return newClientConfiguredForAccelerateError(accessPoint, - req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) - } - - // Ignore the disable host prefix for access points since custom endpoints - // are not supported. - req.Config.DisableEndpointHostPrefix = aws.Bool(false) - - if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil { - return err - } - - removeBucketFromPath(req.HTTPRequest.URL) - - return nil -} - -func removeBucketFromPath(u *url.URL) { - u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) - if u.Path == "" { - u.Path = "/" - } -} - -type accessPointEndpointBuilder arn.AccessPointARN - -const ( - accessPointPrefixLabel = "accesspoint" - accountIDPrefixLabel = "accountID" - accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." -) - -func (a accessPointEndpointBuilder) Build(req *request.Request) error { - resolveRegion := arn.AccessPointARN(a).Region - cfgRegion := aws.StringValue(req.Config.Region) - - if isFIPS(cfgRegion) { - if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) { - // FIPS with cross region is not supported, the SDK must fail - // because there is no well defined method for SDK to construct a - // correct FIPS endpoint. - return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), - req.ClientInfo.PartitionID, cfgRegion, nil) - } - resolveRegion = cfgRegion - } - - endpoint, err := resolveRegionalEndpoint(req, resolveRegion) - if err != nil { - return newFailedToResolveEndpointError(arn.AccessPointARN(a), - req.ClientInfo.PartitionID, cfgRegion, err) - } - - if err = updateRequestEndpoint(req, endpoint.URL); err != nil { - return err - } - - const serviceEndpointLabel = "s3-accesspoint" - - // dualstack provided by endpoint resolver - cfgHost := req.HTTPRequest.URL.Host - if strings.HasPrefix(cfgHost, "s3") { - req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] - } - - protocol.HostPrefixBuilder{ - Prefix: accesPointPrefixTemplate, - LabelsFn: a.hostPrefixLabelValues, - }.Build(req) - - req.ClientInfo.SigningName = endpoint.SigningName - req.ClientInfo.SigningRegion = endpoint.SigningRegion - - err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) - if err != nil { - return newInvalidARNError(arn.AccessPointARN(a), err) - } - - return nil -} - -func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { - return map[string]string{ - accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, - accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, - } -} - -func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) { - return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) { - opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) - opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) - opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint - }) -} - -func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { - endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL)) - - r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) - if err != nil { - return awserr.New(request.ErrCodeSerialization, - "failed to parse endpoint URL", err) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go deleted file mode 100644 index 9df03e78..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go +++ /dev/null @@ -1,151 +0,0 @@ -package s3 - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3/internal/arn" -) - -const ( - invalidARNErrorErrCode = "InvalidARNError" - configurationErrorErrCode = "ConfigurationError" -) - -type invalidARNError struct { - message string - resource arn.Resource - origErr error -} - -func (e invalidARNError) Error() string { - var extra string - if e.resource != nil { - extra = "ARN: " + e.resource.String() - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) -} - -func (e invalidARNError) Code() string { - return invalidARNErrorErrCode -} - -func (e invalidARNError) Message() string { - return e.message -} - -func (e invalidARNError) OrigErr() error { - return e.origErr -} - -func newInvalidARNError(resource arn.Resource, err error) invalidARNError { - return invalidARNError{ - message: "invalid ARN", - origErr: err, - resource: resource, - } -} - -func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError { - return invalidARNError{ - message: "resource ARN not supported with custom client endpoints", - origErr: err, - resource: resource, - } -} - -// ARN not supported for the target partition -func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError { - return invalidARNError{ - message: "resource ARN not supported for the target ARN partition", - origErr: err, - resource: resource, - } -} - -type configurationError struct { - message string - resource arn.Resource - clientPartitionID string - clientRegion string - origErr error -} - -func (e configurationError) Error() string { - extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", - e.resource, e.clientPartitionID, e.clientRegion) - - return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) -} - -func (e configurationError) Code() string { - return configurationErrorErrCode -} - -func (e configurationError) Message() string { - return e.message -} - -func (e configurationError) OrigErr() error { - return e.origErr -} - -func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "client partition does not match provided ARN partition", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "client region does not match provided ARN region", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "endpoint resolver failed to find an endpoint for the provided ARN region", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "client configured for fips but cross-region resource ARN provided", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "client configured for S3 Accelerate but is supported with resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go deleted file mode 100644 index 49aeff16..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -const ( - - // ErrCodeBucketAlreadyExists for service response error code - // "BucketAlreadyExists". - // - // The requested bucket name is not available. The bucket namespace is shared - // by all users of the system. Please select a different name and try again. - ErrCodeBucketAlreadyExists = "BucketAlreadyExists" - - // ErrCodeBucketAlreadyOwnedByYou for service response error code - // "BucketAlreadyOwnedByYou". - // - // The bucket you tried to create already exists, and you own it. Amazon S3 - // returns this error in all AWS Regions except in the North Virginia Region. - // For legacy compatibility, if you re-create an existing bucket that you already - // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the - // bucket access control lists (ACLs). - ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" - - // ErrCodeNoSuchBucket for service response error code - // "NoSuchBucket". - // - // The specified bucket does not exist. - ErrCodeNoSuchBucket = "NoSuchBucket" - - // ErrCodeNoSuchKey for service response error code - // "NoSuchKey". - // - // The specified key does not exist. - ErrCodeNoSuchKey = "NoSuchKey" - - // ErrCodeNoSuchUpload for service response error code - // "NoSuchUpload". - // - // The specified multipart upload does not exist. - ErrCodeNoSuchUpload = "NoSuchUpload" - - // ErrCodeObjectAlreadyInActiveTierError for service response error code - // "ObjectAlreadyInActiveTierError". - // - // This operation is not allowed against this storage tier. - ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" - - // ErrCodeObjectNotInActiveTierError for service response error code - // "ObjectNotInActiveTierError". - // - // The source object of the COPY operation is not in the active tier and is - // only stored in Amazon S3 Glacier. - ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go deleted file mode 100644 index 81cdec1a..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ /dev/null @@ -1,136 +0,0 @@ -package s3 - -import ( - "fmt" - "net/url" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// an operationBlacklist is a list of operation names that should a -// request handler should not be executed with. -type operationBlacklist []string - -// Continue will return true of the Request's operation name is not -// in the blacklist. False otherwise. -func (b operationBlacklist) Continue(r *request.Request) bool { - for i := 0; i < len(b); i++ { - if b[i] == r.Operation.Name { - return false - } - } - return true -} - -var accelerateOpBlacklist = operationBlacklist{ - opListBuckets, opCreateBucket, opDeleteBucket, -} - -// Automatically add the bucket name to the endpoint domain -// if possible. This style of bucket is valid for all bucket names which are -// DNS compatible and do not contain "." -func updateEndpointForS3Config(r *request.Request, bucketName string) { - forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) - accelerate := aws.BoolValue(r.Config.S3UseAccelerate) - - if accelerate && accelerateOpBlacklist.Continue(r) { - if forceHostStyle { - if r.Config.Logger != nil { - r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") - } - } - updateEndpointForAccelerate(r, bucketName) - } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { - updateEndpointForHostStyle(r, bucketName) - } -} - -func updateEndpointForHostStyle(r *request.Request, bucketName string) { - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { - // bucket name must be valid to put into the host - return - } - - moveBucketToHost(r.HTTPRequest.URL, bucketName) -} - -var ( - accelElem = []byte("s3-accelerate.dualstack.") -) - -func updateEndpointForAccelerate(r *request.Request, bucketName string) { - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { - r.Error = awserr.New("InvalidParameterException", - fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), - nil) - return - } - - parts := strings.Split(r.HTTPRequest.URL.Host, ".") - if len(parts) < 3 { - r.Error = awserr.New("InvalidParameterExecption", - fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", - r.HTTPRequest.URL.Host), nil) - return - } - - if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { - parts[0] = "s3-accelerate" - } - for i := 1; i+1 < len(parts); i++ { - if parts[i] == aws.StringValue(r.Config.Region) { - parts = append(parts[:i], parts[i+1:]...) - break - } - } - - r.HTTPRequest.URL.Host = strings.Join(parts, ".") - - moveBucketToHost(r.HTTPRequest.URL, bucketName) -} - -// Attempts to retrieve the bucket name from the request input parameters. -// If no bucket is found, or the field is empty "", false will be returned. -func bucketNameFromReqParams(params interface{}) (string, bool) { - if iface, ok := params.(bucketGetter); ok { - b := iface.getBucket() - return b, len(b) > 0 - } - - return "", false -} - -// hostCompatibleBucketName returns true if the request should -// put the bucket in the host. This is false if S3ForcePathStyle is -// explicitly set or if the bucket is not DNS compatible. -func hostCompatibleBucketName(u *url.URL, bucket string) bool { - // Bucket might be DNS compatible but dots in the hostname will fail - // certificate validation, so do not use host-style. - if u.Scheme == "https" && strings.Contains(bucket, ".") { - return false - } - - // if the bucket is DNS compatible - return dnsCompatibleBucketName(bucket) -} - -var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) -var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) - -// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. -// Buckets created outside of the classic region MUST be DNS compatible. -func dnsCompatibleBucketName(bucket string) bool { - return reDomain.MatchString(bucket) && - !reIPAddress.MatchString(bucket) && - !strings.Contains(bucket, "..") -} - -// moveBucketToHost moves the bucket name from the URI path to URL host. -func moveBucketToHost(u *url.URL, bucket string) { - u.Host = bucket + "." + u.Host - removeBucketFromPath(u) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go deleted file mode 100644 index 2f93f96f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go +++ /dev/null @@ -1,45 +0,0 @@ -package arn - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws/arn" -) - -// AccessPointARN provides representation -type AccessPointARN struct { - arn.ARN - AccessPointName string -} - -// GetARN returns the base ARN for the Access Point resource -func (a AccessPointARN) GetARN() arn.ARN { - return a.ARN -} - -// ParseAccessPointResource attempts to parse the ARN's resource as an -// AccessPoint resource. -func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { - if len(a.Region) == 0 { - return AccessPointARN{}, InvalidARNError{a, "region not set"} - } - if len(a.AccountID) == 0 { - return AccessPointARN{}, InvalidARNError{a, "account-id not set"} - } - if len(resParts) == 0 { - return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} - } - if len(resParts) > 1 { - return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"} - } - - resID := resParts[0] - if len(strings.TrimSpace(resID)) == 0 { - return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} - } - - return AccessPointARN{ - ARN: a, - AccessPointName: resID, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go deleted file mode 100644 index a942d887..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go +++ /dev/null @@ -1,71 +0,0 @@ -package arn - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws/arn" -) - -// Resource provides the interfaces abstracting ARNs of specific resource -// types. -type Resource interface { - GetARN() arn.ARN - String() string -} - -// ResourceParser provides the function for parsing an ARN's resource -// component into a typed resource. -type ResourceParser func(arn.ARN) (Resource, error) - -// ParseResource parses an AWS ARN into a typed resource for the S3 API. -func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { - a, err := arn.Parse(s) - if err != nil { - return nil, err - } - - if len(a.Partition) == 0 { - return nil, InvalidARNError{a, "partition not set"} - } - if a.Service != "s3" { - return nil, InvalidARNError{a, "service is not S3"} - } - if len(a.Resource) == 0 { - return nil, InvalidARNError{a, "resource not set"} - } - - return resParser(a) -} - -// SplitResource splits the resource components by the ARN resource delimiters. -func SplitResource(v string) []string { - var parts []string - var offset int - - for offset <= len(v) { - idx := strings.IndexAny(v[offset:], "/:") - if idx < 0 { - parts = append(parts, v[offset:]) - break - } - parts = append(parts, v[offset:idx+offset]) - offset += idx + 1 - } - - return parts -} - -// IsARN returns whether the given string is an ARN -func IsARN(s string) bool { - return arn.IsARN(s) -} - -// InvalidARNError provides the error for an invalid ARN error. -type InvalidARNError struct { - ARN arn.ARN - Reason string -} - -func (e InvalidARNError) Error() string { - return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go deleted file mode 100644 index 8e6f3307..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !go1.6 - -package s3 - -import "github.com/aws/aws-sdk-go/aws/request" - -func platformRequestHandlers(r *request.Request) { -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go deleted file mode 100644 index 14d05f7b..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build go1.6 - -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -func platformRequestHandlers(r *request.Request) { - if r.Operation.HTTPMethod == "PUT" { - // 100-Continue should only be used on put requests. - r.Handlers.Sign.PushBack(add100Continue) - } -} - -func add100Continue(r *request.Request) { - if aws.BoolValue(r.Config.S3Disable100Continue) { - return - } - if r.HTTPRequest.ContentLength < 1024*1024*2 { - // Ignore requests smaller than 2MB. This helps prevent delaying - // requests unnecessarily. - return - } - - r.HTTPRequest.Header.Set("Expect", "100-Continue") -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go deleted file mode 100644 index 2646a427..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go +++ /dev/null @@ -1,443 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package s3iface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" -) - -// S3API provides an interface to enable mocking the -// s3.S3 service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // Amazon Simple Storage Service. -// func myFunc(svc s3iface.S3API) bool { -// // Make svc.AbortMultipartUpload request -// } -// -// func main() { -// sess := session.New() -// svc := s3.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockS3Client struct { -// s3iface.S3API -// } -// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockS3Client{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type S3API interface { - AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) - AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error) - AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) - - CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) - CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error) - CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) - - CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) - CopyObjectWithContext(aws.Context, *s3.CopyObjectInput, ...request.Option) (*s3.CopyObjectOutput, error) - CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) - - CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error) - CreateBucketWithContext(aws.Context, *s3.CreateBucketInput, ...request.Option) (*s3.CreateBucketOutput, error) - CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) - - CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) - CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error) - CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) - - DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) - DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error) - DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) - - DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) - DeleteBucketAnalyticsConfigurationWithContext(aws.Context, *s3.DeleteBucketAnalyticsConfigurationInput, ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) - DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput) - - DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) - DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error) - DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) - - DeleteBucketEncryption(*s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error) - DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) - DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) - - DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) - DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) - DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) - - DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) - DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error) - DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) - - DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error) - DeleteBucketMetricsConfigurationWithContext(aws.Context, *s3.DeleteBucketMetricsConfigurationInput, ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error) - DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput) - - DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) - DeleteBucketPolicyWithContext(aws.Context, *s3.DeleteBucketPolicyInput, ...request.Option) (*s3.DeleteBucketPolicyOutput, error) - DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) - - DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) - DeleteBucketReplicationWithContext(aws.Context, *s3.DeleteBucketReplicationInput, ...request.Option) (*s3.DeleteBucketReplicationOutput, error) - DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) - - DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) - DeleteBucketTaggingWithContext(aws.Context, *s3.DeleteBucketTaggingInput, ...request.Option) (*s3.DeleteBucketTaggingOutput, error) - DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) - - DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) - DeleteBucketWebsiteWithContext(aws.Context, *s3.DeleteBucketWebsiteInput, ...request.Option) (*s3.DeleteBucketWebsiteOutput, error) - DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) - - DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) - DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error) - DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) - - DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) - DeleteObjectTaggingWithContext(aws.Context, *s3.DeleteObjectTaggingInput, ...request.Option) (*s3.DeleteObjectTaggingOutput, error) - DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput) - - DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) - DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error) - DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) - - DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error) - DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error) - DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput) - - GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) - GetBucketAccelerateConfigurationWithContext(aws.Context, *s3.GetBucketAccelerateConfigurationInput, ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error) - GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) - - GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) - GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error) - GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) - - GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error) - GetBucketAnalyticsConfigurationWithContext(aws.Context, *s3.GetBucketAnalyticsConfigurationInput, ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error) - GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput) - - GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) - GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error) - GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) - - GetBucketEncryption(*s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error) - GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error) - GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) - - GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) - GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) - GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) - - GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) - GetBucketLifecycleWithContext(aws.Context, *s3.GetBucketLifecycleInput, ...request.Option) (*s3.GetBucketLifecycleOutput, error) - GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) - - GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) - GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error) - GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) - - GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) - GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error) - GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) - - GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) - GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error) - GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) - - GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error) - GetBucketMetricsConfigurationWithContext(aws.Context, *s3.GetBucketMetricsConfigurationInput, ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error) - GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput) - - GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) - GetBucketNotificationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfigurationDeprecated, error) - GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) - - GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) - GetBucketNotificationConfigurationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfiguration, error) - GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) - - GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) - GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error) - GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) - - GetBucketPolicyStatus(*s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error) - GetBucketPolicyStatusWithContext(aws.Context, *s3.GetBucketPolicyStatusInput, ...request.Option) (*s3.GetBucketPolicyStatusOutput, error) - GetBucketPolicyStatusRequest(*s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput) - - GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) - GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error) - GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) - - GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) - GetBucketRequestPaymentWithContext(aws.Context, *s3.GetBucketRequestPaymentInput, ...request.Option) (*s3.GetBucketRequestPaymentOutput, error) - GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) - - GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) - GetBucketTaggingWithContext(aws.Context, *s3.GetBucketTaggingInput, ...request.Option) (*s3.GetBucketTaggingOutput, error) - GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) - - GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) - GetBucketVersioningWithContext(aws.Context, *s3.GetBucketVersioningInput, ...request.Option) (*s3.GetBucketVersioningOutput, error) - GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) - - GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) - GetBucketWebsiteWithContext(aws.Context, *s3.GetBucketWebsiteInput, ...request.Option) (*s3.GetBucketWebsiteOutput, error) - GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) - - GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) - GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error) - GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) - - GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) - GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error) - GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) - - GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error) - GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error) - GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput) - - GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error) - GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error) - GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput) - - GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error) - GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error) - GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput) - - GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) - GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error) - GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput) - - GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) - GetObjectTorrentWithContext(aws.Context, *s3.GetObjectTorrentInput, ...request.Option) (*s3.GetObjectTorrentOutput, error) - GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) - - GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error) - GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error) - GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput) - - HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) - HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error) - HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) - - HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) - HeadObjectWithContext(aws.Context, *s3.HeadObjectInput, ...request.Option) (*s3.HeadObjectOutput, error) - HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) - - ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error) - ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) - ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) - - ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) - ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) - ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) - - ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error) - ListBucketMetricsConfigurationsWithContext(aws.Context, *s3.ListBucketMetricsConfigurationsInput, ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error) - ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput) - - ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) - ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error) - ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) - - ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) - ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error) - ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) - - ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error - ListMultipartUploadsPagesWithContext(aws.Context, *s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool, ...request.Option) error - - ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) - ListObjectVersionsWithContext(aws.Context, *s3.ListObjectVersionsInput, ...request.Option) (*s3.ListObjectVersionsOutput, error) - ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) - - ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error - ListObjectVersionsPagesWithContext(aws.Context, *s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool, ...request.Option) error - - ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) - ListObjectsWithContext(aws.Context, *s3.ListObjectsInput, ...request.Option) (*s3.ListObjectsOutput, error) - ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) - - ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error - ListObjectsPagesWithContext(aws.Context, *s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool, ...request.Option) error - - ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) - ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error) - ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output) - - ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error - ListObjectsV2PagesWithContext(aws.Context, *s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool, ...request.Option) error - - ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error) - ListPartsWithContext(aws.Context, *s3.ListPartsInput, ...request.Option) (*s3.ListPartsOutput, error) - ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) - - ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error - ListPartsPagesWithContext(aws.Context, *s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool, ...request.Option) error - - PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error) - PutBucketAccelerateConfigurationWithContext(aws.Context, *s3.PutBucketAccelerateConfigurationInput, ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error) - PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput) - - PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) - PutBucketAclWithContext(aws.Context, *s3.PutBucketAclInput, ...request.Option) (*s3.PutBucketAclOutput, error) - PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) - - PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error) - PutBucketAnalyticsConfigurationWithContext(aws.Context, *s3.PutBucketAnalyticsConfigurationInput, ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error) - PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput) - - PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) - PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error) - PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) - - PutBucketEncryption(*s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error) - PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error) - PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) - - PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) - PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) - PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) - - PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) - PutBucketLifecycleWithContext(aws.Context, *s3.PutBucketLifecycleInput, ...request.Option) (*s3.PutBucketLifecycleOutput, error) - PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) - - PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) - PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error) - PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) - - PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) - PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error) - PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) - - PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error) - PutBucketMetricsConfigurationWithContext(aws.Context, *s3.PutBucketMetricsConfigurationInput, ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error) - PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput) - - PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) - PutBucketNotificationWithContext(aws.Context, *s3.PutBucketNotificationInput, ...request.Option) (*s3.PutBucketNotificationOutput, error) - PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) - - PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) - PutBucketNotificationConfigurationWithContext(aws.Context, *s3.PutBucketNotificationConfigurationInput, ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error) - PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) - - PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) - PutBucketPolicyWithContext(aws.Context, *s3.PutBucketPolicyInput, ...request.Option) (*s3.PutBucketPolicyOutput, error) - PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) - - PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) - PutBucketReplicationWithContext(aws.Context, *s3.PutBucketReplicationInput, ...request.Option) (*s3.PutBucketReplicationOutput, error) - PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) - - PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) - PutBucketRequestPaymentWithContext(aws.Context, *s3.PutBucketRequestPaymentInput, ...request.Option) (*s3.PutBucketRequestPaymentOutput, error) - PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) - - PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) - PutBucketTaggingWithContext(aws.Context, *s3.PutBucketTaggingInput, ...request.Option) (*s3.PutBucketTaggingOutput, error) - PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) - - PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) - PutBucketVersioningWithContext(aws.Context, *s3.PutBucketVersioningInput, ...request.Option) (*s3.PutBucketVersioningOutput, error) - PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) - - PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) - PutBucketWebsiteWithContext(aws.Context, *s3.PutBucketWebsiteInput, ...request.Option) (*s3.PutBucketWebsiteOutput, error) - PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) - - PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) - PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error) - PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) - - PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) - PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error) - PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) - - PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error) - PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error) - PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput) - - PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error) - PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error) - PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput) - - PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error) - PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error) - PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput) - - PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) - PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error) - PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput) - - PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error) - PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error) - PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput) - - RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) - RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error) - RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) - - SelectObjectContent(*s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error) - SelectObjectContentWithContext(aws.Context, *s3.SelectObjectContentInput, ...request.Option) (*s3.SelectObjectContentOutput, error) - SelectObjectContentRequest(*s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput) - - UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) - UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error) - UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) - - UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) - UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error) - UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) - - WaitUntilBucketExists(*s3.HeadBucketInput) error - WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error - - WaitUntilBucketNotExists(*s3.HeadBucketInput) error - WaitUntilBucketNotExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error - - WaitUntilObjectExists(*s3.HeadObjectInput) error - WaitUntilObjectExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error - - WaitUntilObjectNotExists(*s3.HeadObjectInput) error - WaitUntilObjectNotExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error -} - -var _ S3API = (*s3.S3)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go deleted file mode 100644 index 22bd0b7c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go +++ /dev/null @@ -1,529 +0,0 @@ -package s3manager - -import ( - "bytes" - "fmt" - "io" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" -) - -const ( - // DefaultBatchSize is the batch size we initialize when constructing a batch delete client. - // This value is used when calling DeleteObjects. This represents how many objects to delete - // per DeleteObjects call. - DefaultBatchSize = 100 -) - -// BatchError will contain the key and bucket of the object that failed to -// either upload or download. -type BatchError struct { - Errors Errors - code string - message string -} - -// Errors is a typed alias for a slice of errors to satisfy the error -// interface. -type Errors []Error - -func (errs Errors) Error() string { - buf := bytes.NewBuffer(nil) - for i, err := range errs { - buf.WriteString(err.Error()) - if i+1 < len(errs) { - buf.WriteString("\n") - } - } - return buf.String() -} - -// Error will contain the original error, bucket, and key of the operation that failed -// during batch operations. -type Error struct { - OrigErr error - Bucket *string - Key *string -} - -func newError(err error, bucket, key *string) Error { - return Error{ - err, - bucket, - key, - } -} - -func (err *Error) Error() string { - origErr := "" - if err.OrigErr != nil { - origErr = ":\n" + err.OrigErr.Error() - } - return fmt.Sprintf("failed to perform batch operation on %q to %q%s", - aws.StringValue(err.Key), - aws.StringValue(err.Bucket), - origErr, - ) -} - -// NewBatchError will return a BatchError that satisfies the awserr.Error interface. -func NewBatchError(code, message string, err []Error) awserr.Error { - return &BatchError{ - Errors: err, - code: code, - message: message, - } -} - -// Code will return the code associated with the batch error. -func (err *BatchError) Code() string { - return err.code -} - -// Message will return the message associated with the batch error. -func (err *BatchError) Message() string { - return err.message -} - -func (err *BatchError) Error() string { - return awserr.SprintError(err.Code(), err.Message(), "", err.Errors) -} - -// OrigErr will return the original error. Which, in this case, will always be nil -// for batched operations. -func (err *BatchError) OrigErr() error { - return err.Errors -} - -// BatchDeleteIterator is an interface that uses the scanner pattern to -// iterate through what needs to be deleted. -type BatchDeleteIterator interface { - Next() bool - Err() error - DeleteObject() BatchDeleteObject -} - -// DeleteListIterator is an alternative iterator for the BatchDelete client. This will -// iterate through a list of objects and delete the objects. -// -// Example: -// iter := &s3manager.DeleteListIterator{ -// Client: svc, -// Input: &s3.ListObjectsInput{ -// Bucket: aws.String("bucket"), -// MaxKeys: aws.Int64(5), -// }, -// Paginator: request.Pagination{ -// NewRequest: func() (*request.Request, error) { -// var inCpy *ListObjectsInput -// if input != nil { -// tmp := *input -// inCpy = &tmp -// } -// req, _ := c.ListObjectsRequest(inCpy) -// return req, nil -// }, -// }, -// } -// -// batcher := s3manager.NewBatchDeleteWithClient(svc) -// if err := batcher.Delete(aws.BackgroundContext(), iter); err != nil { -// return err -// } -type DeleteListIterator struct { - Bucket *string - Paginator request.Pagination - objects []*s3.Object -} - -// NewDeleteListIterator will return a new DeleteListIterator. -func NewDeleteListIterator(svc s3iface.S3API, input *s3.ListObjectsInput, opts ...func(*DeleteListIterator)) BatchDeleteIterator { - iter := &DeleteListIterator{ - Bucket: input.Bucket, - Paginator: request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *s3.ListObjectsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := svc.ListObjectsRequest(inCpy) - return req, nil - }, - }, - } - - for _, opt := range opts { - opt(iter) - } - return iter -} - -// Next will use the S3API client to iterate through a list of objects. -func (iter *DeleteListIterator) Next() bool { - if len(iter.objects) > 0 { - iter.objects = iter.objects[1:] - } - - if len(iter.objects) == 0 && iter.Paginator.Next() { - iter.objects = iter.Paginator.Page().(*s3.ListObjectsOutput).Contents - } - - return len(iter.objects) > 0 -} - -// Err will return the last known error from Next. -func (iter *DeleteListIterator) Err() error { - return iter.Paginator.Err() -} - -// DeleteObject will return the current object to be deleted. -func (iter *DeleteListIterator) DeleteObject() BatchDeleteObject { - return BatchDeleteObject{ - Object: &s3.DeleteObjectInput{ - Bucket: iter.Bucket, - Key: iter.objects[0].Key, - }, - } -} - -// BatchDelete will use the s3 package's service client to perform a batch -// delete. -type BatchDelete struct { - Client s3iface.S3API - BatchSize int -} - -// NewBatchDeleteWithClient will return a new delete client that can delete a batched amount of -// objects. -// -// Example: -// batcher := s3manager.NewBatchDeleteWithClient(client, size) -// -// objects := []BatchDeleteObject{ -// { -// Object: &s3.DeleteObjectInput { -// Key: aws.String("key"), -// Bucket: aws.String("bucket"), -// }, -// }, -// } -// -// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{ -// Objects: objects, -// }); err != nil { -// return err -// } -func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete)) *BatchDelete { - svc := &BatchDelete{ - Client: client, - BatchSize: DefaultBatchSize, - } - - for _, opt := range options { - opt(svc) - } - - return svc -} - -// NewBatchDelete will return a new delete client that can delete a batched amount of -// objects. -// -// Example: -// batcher := s3manager.NewBatchDelete(sess, size) -// -// objects := []BatchDeleteObject{ -// { -// Object: &s3.DeleteObjectInput { -// Key: aws.String("key"), -// Bucket: aws.String("bucket"), -// }, -// }, -// } -// -// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{ -// Objects: objects, -// }); err != nil { -// return err -// } -func NewBatchDelete(c client.ConfigProvider, options ...func(*BatchDelete)) *BatchDelete { - client := s3.New(c) - return NewBatchDeleteWithClient(client, options...) -} - -// BatchDeleteObject is a wrapper object for calling the batch delete operation. -type BatchDeleteObject struct { - Object *s3.DeleteObjectInput - // After will run after each iteration during the batch process. This function will - // be executed whether or not the request was successful. - After func() error -} - -// DeleteObjectsIterator is an interface that uses the scanner pattern to iterate -// through a series of objects to be deleted. -type DeleteObjectsIterator struct { - Objects []BatchDeleteObject - index int - inc bool -} - -// Next will increment the default iterator's index and ensure that there -// is another object to iterator to. -func (iter *DeleteObjectsIterator) Next() bool { - if iter.inc { - iter.index++ - } else { - iter.inc = true - } - return iter.index < len(iter.Objects) -} - -// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface -// this will only return nil. -func (iter *DeleteObjectsIterator) Err() error { - return nil -} - -// DeleteObject will return the BatchDeleteObject at the current batched index. -func (iter *DeleteObjectsIterator) DeleteObject() BatchDeleteObject { - object := iter.Objects[iter.index] - return object -} - -// Delete will use the iterator to queue up objects that need to be deleted. -// Once the batch size is met, this will call the deleteBatch function. -func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error { - var errs []Error - objects := []BatchDeleteObject{} - var input *s3.DeleteObjectsInput - - for iter.Next() { - o := iter.DeleteObject() - - if input == nil { - input = initDeleteObjectsInput(o.Object) - } - - parity := hasParity(input, o) - if parity { - input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{ - Key: o.Object.Key, - VersionId: o.Object.VersionId, - }) - objects = append(objects, o) - } - - if len(input.Delete.Objects) == d.BatchSize || !parity { - if err := deleteBatch(ctx, d, input, objects); err != nil { - errs = append(errs, err...) - } - - objects = objects[:0] - input = nil - - if !parity { - objects = append(objects, o) - input = initDeleteObjectsInput(o.Object) - input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{ - Key: o.Object.Key, - VersionId: o.Object.VersionId, - }) - } - } - } - - // iter.Next() could return false (above) plus populate iter.Err() - if iter.Err() != nil { - errs = append(errs, newError(iter.Err(), nil, nil)) - } - - if input != nil && len(input.Delete.Objects) > 0 { - if err := deleteBatch(ctx, d, input, objects); err != nil { - errs = append(errs, err...) - } - } - - if len(errs) > 0 { - return NewBatchError("BatchedDeleteIncomplete", "some objects have failed to be deleted.", errs) - } - return nil -} - -func initDeleteObjectsInput(o *s3.DeleteObjectInput) *s3.DeleteObjectsInput { - return &s3.DeleteObjectsInput{ - Bucket: o.Bucket, - MFA: o.MFA, - RequestPayer: o.RequestPayer, - Delete: &s3.Delete{}, - } -} - -const ( - // ErrDeleteBatchFailCode represents an error code which will be returned - // only when DeleteObjects.Errors has an error that does not contain a code. - ErrDeleteBatchFailCode = "DeleteBatchError" - errDefaultDeleteBatchMessage = "failed to delete" -) - -// deleteBatch will delete a batch of items in the objects parameters. -func deleteBatch(ctx aws.Context, d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error { - errs := []Error{} - - if result, err := d.Client.DeleteObjectsWithContext(ctx, input); err != nil { - for i := 0; i < len(input.Delete.Objects); i++ { - errs = append(errs, newError(err, input.Bucket, input.Delete.Objects[i].Key)) - } - } else if len(result.Errors) > 0 { - for i := 0; i < len(result.Errors); i++ { - code := ErrDeleteBatchFailCode - msg := errDefaultDeleteBatchMessage - if result.Errors[i].Message != nil { - msg = *result.Errors[i].Message - } - if result.Errors[i].Code != nil { - code = *result.Errors[i].Code - } - - errs = append(errs, newError(awserr.New(code, msg, err), input.Bucket, result.Errors[i].Key)) - } - } - for _, object := range objects { - if object.After == nil { - continue - } - if err := object.After(); err != nil { - errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) - } - } - - return errs -} - -func hasParity(o1 *s3.DeleteObjectsInput, o2 BatchDeleteObject) bool { - if o1.Bucket != nil && o2.Object.Bucket != nil { - if *o1.Bucket != *o2.Object.Bucket { - return false - } - } else if o1.Bucket != o2.Object.Bucket { - return false - } - - if o1.MFA != nil && o2.Object.MFA != nil { - if *o1.MFA != *o2.Object.MFA { - return false - } - } else if o1.MFA != o2.Object.MFA { - return false - } - - if o1.RequestPayer != nil && o2.Object.RequestPayer != nil { - if *o1.RequestPayer != *o2.Object.RequestPayer { - return false - } - } else if o1.RequestPayer != o2.Object.RequestPayer { - return false - } - - return true -} - -// BatchDownloadIterator is an interface that uses the scanner pattern to iterate -// through a series of objects to be downloaded. -type BatchDownloadIterator interface { - Next() bool - Err() error - DownloadObject() BatchDownloadObject -} - -// BatchDownloadObject contains all necessary information to run a batch operation once. -type BatchDownloadObject struct { - Object *s3.GetObjectInput - Writer io.WriterAt - // After will run after each iteration during the batch process. This function will - // be executed whether or not the request was successful. - After func() error -} - -// DownloadObjectsIterator implements the BatchDownloadIterator interface and allows for batched -// download of objects. -type DownloadObjectsIterator struct { - Objects []BatchDownloadObject - index int - inc bool -} - -// Next will increment the default iterator's index and ensure that there -// is another object to iterator to. -func (batcher *DownloadObjectsIterator) Next() bool { - if batcher.inc { - batcher.index++ - } else { - batcher.inc = true - } - return batcher.index < len(batcher.Objects) -} - -// DownloadObject will return the BatchDownloadObject at the current batched index. -func (batcher *DownloadObjectsIterator) DownloadObject() BatchDownloadObject { - object := batcher.Objects[batcher.index] - return object -} - -// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface -// this will only return nil. -func (batcher *DownloadObjectsIterator) Err() error { - return nil -} - -// BatchUploadIterator is an interface that uses the scanner pattern to -// iterate through what needs to be uploaded. -type BatchUploadIterator interface { - Next() bool - Err() error - UploadObject() BatchUploadObject -} - -// UploadObjectsIterator implements the BatchUploadIterator interface and allows for batched -// upload of objects. -type UploadObjectsIterator struct { - Objects []BatchUploadObject - index int - inc bool -} - -// Next will increment the default iterator's index and ensure that there -// is another object to iterator to. -func (batcher *UploadObjectsIterator) Next() bool { - if batcher.inc { - batcher.index++ - } else { - batcher.inc = true - } - return batcher.index < len(batcher.Objects) -} - -// Err will return an error. Since this is just used to satisfy the BatchUploadIterator interface -// this will only return nil. -func (batcher *UploadObjectsIterator) Err() error { - return nil -} - -// UploadObject will return the BatchUploadObject at the current batched index. -func (batcher *UploadObjectsIterator) UploadObject() BatchUploadObject { - object := batcher.Objects[batcher.index] - return object -} - -// BatchUploadObject contains all necessary information to run a batch operation once. -type BatchUploadObject struct { - Object *UploadInput - // After will run after each iteration during the batch process. This function will - // be executed whether or not the request was successful. - After func() error -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go deleted file mode 100644 index f61665a5..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go +++ /dev/null @@ -1,88 +0,0 @@ -package s3manager - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" -) - -// GetBucketRegion will attempt to get the region for a bucket using the -// regionHint to determine which AWS partition to perform the query on. -// -// The request will not be signed, and will not use your AWS credentials. -// -// A "NotFound" error code will be returned if the bucket does not exist in the -// AWS partition the regionHint belongs to. If the regionHint parameter is an -// empty string GetBucketRegion will fallback to the ConfigProvider's region -// config. If the regionHint is empty, and the ConfigProvider does not have a -// region value, an error will be returned.. -// -// For example to get the region of a bucket which exists in "eu-central-1" -// you could provide a region hint of "us-west-2". -// -// sess := session.Must(session.NewSession()) -// -// bucket := "my-bucket" -// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") -// if err != nil { -// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { -// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) -// } -// return err -// } -// fmt.Printf("Bucket %s is in %s region\n", bucket, region) -// -func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) { - var cfg aws.Config - if len(regionHint) != 0 { - cfg.Region = aws.String(regionHint) - } - svc := s3.New(c, &cfg) - return GetBucketRegionWithClient(ctx, svc, bucket, opts...) -} - -const bucketRegionHeader = "X-Amz-Bucket-Region" - -// GetBucketRegionWithClient is the same as GetBucketRegion with the exception -// that it takes a S3 service client instead of a Session. The regionHint is -// derived from the region the S3 service client was created in. -// -// See GetBucketRegion for more information. -func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) { - req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{ - Bucket: aws.String(bucket), - }) - req.Config.S3ForcePathStyle = aws.Bool(true) - req.Config.Credentials = credentials.AnonymousCredentials - req.SetContext(ctx) - - // Disable HTTP redirects to prevent an invalid 301 from eating the response - // because Go's HTTP client will fail, and drop the response if an 301 is - // received without a location header. S3 will return a 301 without the - // location header for HeadObject API calls. - req.DisableFollowRedirects = true - - var bucketRegion string - req.Handlers.Send.PushBack(func(r *request.Request) { - bucketRegion = r.HTTPResponse.Header.Get(bucketRegionHeader) - if len(bucketRegion) == 0 { - return - } - r.HTTPResponse.StatusCode = 200 - r.HTTPResponse.Status = "OK" - r.Error = nil - }) - - req.ApplyOptions(opts...) - - if err := req.Send(); err != nil { - return "", err - } - - bucketRegion = s3.NormalizeBucketLocation(bucketRegion) - - return bucketRegion, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go deleted file mode 100644 index f1d9e85c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go +++ /dev/null @@ -1,81 +0,0 @@ -package s3manager - -import ( - "io" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// BufferedReadSeeker is buffered io.ReadSeeker -type BufferedReadSeeker struct { - r io.ReadSeeker - buffer []byte - readIdx, writeIdx int -} - -// NewBufferedReadSeeker returns a new BufferedReadSeeker -// if len(b) == 0 then the buffer will be initialized to 64 KiB. -func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker { - if len(b) == 0 { - b = make([]byte, 64*1024) - } - return &BufferedReadSeeker{r: r, buffer: b} -} - -func (b *BufferedReadSeeker) reset(r io.ReadSeeker) { - b.r = r - b.readIdx, b.writeIdx = 0, 0 -} - -// Read will read up len(p) bytes into p and will return -// the number of bytes read and any error that occurred. -// If the len(p) > the buffer size then a single read request -// will be issued to the underlying io.ReadSeeker for len(p) bytes. -// A Read request will at most perform a single Read to the underlying -// io.ReadSeeker, and may return < len(p) if serviced from the buffer. -func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return n, err - } - - if b.readIdx == b.writeIdx { - if len(p) >= len(b.buffer) { - n, err = b.r.Read(p) - return n, err - } - b.readIdx, b.writeIdx = 0, 0 - - n, err = b.r.Read(b.buffer) - if n == 0 { - return n, err - } - - b.writeIdx += n - } - - n = copy(p, b.buffer[b.readIdx:b.writeIdx]) - b.readIdx += n - - return n, err -} - -// Seek will position then underlying io.ReadSeeker to the given offset -// and will clear the buffer. -func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { - n, err := b.r.Seek(offset, whence) - - b.reset(b.r) - - return n, err -} - -// ReadAt will read up to len(p) bytes at the given file offset. -// This will result in the buffer being cleared. -func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) { - _, err := b.Seek(off, sdkio.SeekStart) - if err != nil { - return 0, err - } - - return b.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go deleted file mode 100644 index 42276530..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package s3manager - -func defaultUploadBufferProvider() ReadSeekerWriteToProvider { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go deleted file mode 100644 index 687082c3..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package s3manager - -func defaultUploadBufferProvider() ReadSeekerWriteToProvider { - return NewBufferedReadSeekerWriteToPool(1024 * 1024) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go deleted file mode 100644 index ada50c24..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package s3manager - -func defaultDownloadBufferProvider() WriterReadFromProvider { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go deleted file mode 100644 index 7e9d9579..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package s3manager - -func defaultDownloadBufferProvider() WriterReadFromProvider { - return NewPooledBufferedWriterReadFromProvider(1024 * 1024) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go deleted file mode 100644 index 229c0d63..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package s3manager provides utilities to upload and download objects from -// S3 concurrently. Helpful for when working with large objects. -package s3manager diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go deleted file mode 100644 index 4b54b7c0..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go +++ /dev/null @@ -1,597 +0,0 @@ -package s3manager - -import ( - "fmt" - "io" - "net/http" - "strconv" - "strings" - "sync" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" -) - -// DefaultDownloadPartSize is the default range of bytes to get at a time when -// using Download(). -const DefaultDownloadPartSize = 1024 * 1024 * 5 - -// DefaultDownloadConcurrency is the default number of goroutines to spin up -// when using Download(). -const DefaultDownloadConcurrency = 5 - -type errReadingBody struct { - err error -} - -func (e *errReadingBody) Error() string { - return fmt.Sprintf("failed to read part body: %v", e.err) -} - -func (e *errReadingBody) Unwrap() error { - return e.err -} - -// The Downloader structure that calls Download(). It is safe to call Download() -// on this structure for multiple objects and across concurrent goroutines. -// Mutating the Downloader's properties is not safe to be done concurrently. -type Downloader struct { - // The size (in bytes) to request from S3 for each part. - // The minimum allowed part size is 5MB, and if this value is set to zero, - // the DefaultDownloadPartSize value will be used. - // - // PartSize is ignored if the Range input parameter is provided. - PartSize int64 - - // The number of goroutines to spin up in parallel when sending parts. - // If this is set to zero, the DefaultDownloadConcurrency value will be used. - // - // Concurrency of 1 will download the parts sequentially. - // - // Concurrency is ignored if the Range input parameter is provided. - Concurrency int - - // An S3 client to use when performing downloads. - S3 s3iface.S3API - - // List of request options that will be passed down to individual API - // operation requests made by the downloader. - RequestOptions []request.Option - - // Defines the buffer strategy used when downloading a part. - // - // If a WriterReadFromProvider is given the Download manager - // will pass the io.WriterAt of the Download request to the provider - // and will use the returned WriterReadFrom from the provider as the - // destination writer when copying from http response body. - BufferProvider WriterReadFromProvider -} - -// WithDownloaderRequestOptions appends to the Downloader's API request options. -func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) { - return func(d *Downloader) { - d.RequestOptions = append(d.RequestOptions, opts...) - } -} - -// NewDownloader creates a new Downloader instance to downloads objects from -// S3 in concurrent chunks. Pass in additional functional options to customize -// the downloader behavior. Requires a client.ConfigProvider in order to create -// a S3 service client. The session.Session satisfies the client.ConfigProvider -// interface. -// -// Example: -// // The session the S3 Downloader will use -// sess := session.Must(session.NewSession()) -// -// // Create a downloader with the session and default options -// downloader := s3manager.NewDownloader(sess) -// -// // Create a downloader with the session and custom options -// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) { -// d.PartSize = 64 * 1024 * 1024 // 64MB per part -// }) -func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { - return newDownloader(s3.New(c), options...) -} - -func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader { - d := &Downloader{ - S3: client, - PartSize: DefaultDownloadPartSize, - Concurrency: DefaultDownloadConcurrency, - BufferProvider: defaultDownloadBufferProvider(), - } - for _, option := range options { - option(d) - } - - return d -} - -// NewDownloaderWithClient creates a new Downloader instance to downloads -// objects from S3 in concurrent chunks. Pass in additional functional -// options to customize the downloader behavior. Requires a S3 service client -// to make S3 API calls. -// -// Example: -// // The session the S3 Downloader will use -// sess := session.Must(session.NewSession()) -// -// // The S3 client the S3 Downloader will use -// s3Svc := s3.New(sess) -// -// // Create a downloader with the s3 client and default options -// downloader := s3manager.NewDownloaderWithClient(s3Svc) -// -// // Create a downloader with the s3 client and custom options -// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) { -// d.PartSize = 64 * 1024 * 1024 // 64MB per part -// }) -func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { - return newDownloader(svc, options...) -} - -type maxRetrier interface { - MaxRetries() int -} - -// Download downloads an object in S3 and writes the payload into w using -// concurrent GET requests. The n int64 returned is the size of the object downloaded -// in bytes. -// -// Additional functional options can be provided to configure the individual -// download. These options are copies of the Downloader instance Download is called from. -// Modifying the options will not impact the original Downloader instance. -// -// It is safe to call this method concurrently across goroutines. -// -// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent -// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. -// -// Specifying a Downloader.Concurrency of 1 will cause the Downloader to -// download the parts from S3 sequentially. -// -// If the GetObjectInput's Range value is provided that will cause the downloader -// to perform a single GetObjectInput request for that object's range. This will -// caused the part size, and concurrency configurations to be ignored. -func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { - return d.DownloadWithContext(aws.BackgroundContext(), w, input, options...) -} - -// DownloadWithContext downloads an object in S3 and writes the payload into w -// using concurrent GET requests. The n int64 returned is the size of the object downloaded -// in bytes. -// -// DownloadWithContext is the same as Download with the additional support for -// Context input parameters. The Context must not be nil. A nil Context will -// cause a panic. Use the Context to add deadlining, timeouts, etc. The -// DownloadWithContext may create sub-contexts for individual underlying -// requests. -// -// Additional functional options can be provided to configure the individual -// download. These options are copies of the Downloader instance Download is -// called from. Modifying the options will not impact the original Downloader -// instance. Use the WithDownloaderRequestOptions helper function to pass in request -// options that will be applied to all API operations made with this downloader. -// -// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent -// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. -// -// Specifying a Downloader.Concurrency of 1 will cause the Downloader to -// download the parts from S3 sequentially. -// -// It is safe to call this method concurrently across goroutines. -// -// If the GetObjectInput's Range value is provided that will cause the downloader -// to perform a single GetObjectInput request for that object's range. This will -// caused the part size, and concurrency configurations to be ignored. -func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { - impl := downloader{w: w, in: input, cfg: d, ctx: ctx} - - for _, option := range options { - option(&impl.cfg) - } - impl.cfg.RequestOptions = append(impl.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager")) - - if s, ok := d.S3.(maxRetrier); ok { - impl.partBodyMaxRetries = s.MaxRetries() - } - - impl.totalBytes = -1 - if impl.cfg.Concurrency == 0 { - impl.cfg.Concurrency = DefaultDownloadConcurrency - } - - if impl.cfg.PartSize == 0 { - impl.cfg.PartSize = DefaultDownloadPartSize - } - - return impl.download() -} - -// DownloadWithIterator will download a batched amount of objects in S3 and writes them -// to the io.WriterAt specificed in the iterator. -// -// Example: -// svc := s3manager.NewDownloader(session) -// -// fooFile, err := os.Open("/tmp/foo.file") -// if err != nil { -// return err -// } -// -// barFile, err := os.Open("/tmp/bar.file") -// if err != nil { -// return err -// } -// -// objects := []s3manager.BatchDownloadObject { -// { -// Object: &s3.GetObjectInput { -// Bucket: aws.String("bucket"), -// Key: aws.String("foo"), -// }, -// Writer: fooFile, -// }, -// { -// Object: &s3.GetObjectInput { -// Bucket: aws.String("bucket"), -// Key: aws.String("bar"), -// }, -// Writer: barFile, -// }, -// } -// -// iter := &s3manager.DownloadObjectsIterator{Objects: objects} -// if err := svc.DownloadWithIterator(aws.BackgroundContext(), iter); err != nil { -// return err -// } -func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error { - var errs []Error - for iter.Next() { - object := iter.DownloadObject() - if _, err := d.DownloadWithContext(ctx, object.Writer, object.Object, opts...); err != nil { - errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) - } - - if object.After == nil { - continue - } - - if err := object.After(); err != nil { - errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) - } - } - - if len(errs) > 0 { - return NewBatchError("BatchedDownloadIncomplete", "some objects have failed to download.", errs) - } - return nil -} - -// downloader is the implementation structure used internally by Downloader. -type downloader struct { - ctx aws.Context - cfg Downloader - - in *s3.GetObjectInput - w io.WriterAt - - wg sync.WaitGroup - m sync.Mutex - - pos int64 - totalBytes int64 - written int64 - err error - - partBodyMaxRetries int -} - -// download performs the implementation of the object download across ranged -// GETs. -func (d *downloader) download() (n int64, err error) { - // If range is specified fall back to single download of that range - // this enables the functionality of ranged gets with the downloader but - // at the cost of no multipart downloads. - if rng := aws.StringValue(d.in.Range); len(rng) > 0 { - d.downloadRange(rng) - return d.written, d.err - } - - // Spin off first worker to check additional header information - d.getChunk() - - if total := d.getTotalBytes(); total >= 0 { - // Spin up workers - ch := make(chan dlchunk, d.cfg.Concurrency) - - for i := 0; i < d.cfg.Concurrency; i++ { - d.wg.Add(1) - go d.downloadPart(ch) - } - - // Assign work - for d.getErr() == nil { - if d.pos >= total { - break // We're finished queuing chunks - } - - // Queue the next range of bytes to read. - ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} - d.pos += d.cfg.PartSize - } - - // Wait for completion - close(ch) - d.wg.Wait() - } else { - // Checking if we read anything new - for d.err == nil { - d.getChunk() - } - - // We expect a 416 error letting us know we are done downloading the - // total bytes. Since we do not know the content's length, this will - // keep grabbing chunks of data until the range of bytes specified in - // the request is out of range of the content. Once, this happens, a - // 416 should occur. - e, ok := d.err.(awserr.RequestFailure) - if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable { - d.err = nil - } - } - - // Return error - return d.written, d.err -} - -// downloadPart is an individual goroutine worker reading from the ch channel -// and performing a GetObject request on the data with a given byte range. -// -// If this is the first worker, this operation also resolves the total number -// of bytes to be read so that the worker manager knows when it is finished. -func (d *downloader) downloadPart(ch chan dlchunk) { - defer d.wg.Done() - for { - chunk, ok := <-ch - if !ok { - break - } - if d.getErr() != nil { - // Drain the channel if there is an error, to prevent deadlocking - // of download producer. - continue - } - - if err := d.downloadChunk(chunk); err != nil { - d.setErr(err) - } - } -} - -// getChunk grabs a chunk of data from the body. -// Not thread safe. Should only used when grabbing data on a single thread. -func (d *downloader) getChunk() { - if d.getErr() != nil { - return - } - - chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} - d.pos += d.cfg.PartSize - - if err := d.downloadChunk(chunk); err != nil { - d.setErr(err) - } -} - -// downloadRange downloads an Object given the passed in Byte-Range value. -// The chunk used down download the range will be configured for that range. -func (d *downloader) downloadRange(rng string) { - if d.getErr() != nil { - return - } - - chunk := dlchunk{w: d.w, start: d.pos} - // Ranges specified will short circuit the multipart download - chunk.withRange = rng - - if err := d.downloadChunk(chunk); err != nil { - d.setErr(err) - } - - // Update the position based on the amount of data received. - d.pos = d.written -} - -// downloadChunk downloads the chunk from s3 -func (d *downloader) downloadChunk(chunk dlchunk) error { - in := &s3.GetObjectInput{} - awsutil.Copy(in, d.in) - - // Get the next byte range of data - in.Range = aws.String(chunk.ByteRange()) - - var n int64 - var err error - for retry := 0; retry <= d.partBodyMaxRetries; retry++ { - n, err = d.tryDownloadChunk(in, &chunk) - if err == nil { - break - } - // Check if the returned error is an errReadingBody. - // If err is errReadingBody this indicates that an error - // occurred while copying the http response body. - // If this occurs we unwrap the err to set the underlying error - // and attempt any remaining retries. - if bodyErr, ok := err.(*errReadingBody); ok { - err = bodyErr.Unwrap() - } else { - return err - } - - chunk.cur = 0 - logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries, - fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d", - aws.StringValue(in.Key), err, retry)) - } - - d.incrWritten(n) - - return err -} - -func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) { - cleanup := func() {} - if d.cfg.BufferProvider != nil { - w, cleanup = d.cfg.BufferProvider.GetReadFrom(w) - } - defer cleanup() - - resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...) - if err != nil { - return 0, err - } - d.setTotalBytes(resp) // Set total if not yet set. - - n, err := io.Copy(w, resp.Body) - resp.Body.Close() - if err != nil { - return n, &errReadingBody{err: err} - } - - return n, nil -} - -func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) { - s, ok := svc.(*s3.S3) - if !ok { - return - } - - if s.Config.Logger == nil { - return - } - - if s.Config.LogLevel.Matches(level) { - s.Config.Logger.Log(msg) - } -} - -// getTotalBytes is a thread-safe getter for retrieving the total byte status. -func (d *downloader) getTotalBytes() int64 { - d.m.Lock() - defer d.m.Unlock() - - return d.totalBytes -} - -// setTotalBytes is a thread-safe setter for setting the total byte status. -// Will extract the object's total bytes from the Content-Range if the file -// will be chunked, or Content-Length. Content-Length is used when the response -// does not include a Content-Range. Meaning the object was not chunked. This -// occurs when the full file fits within the PartSize directive. -func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { - d.m.Lock() - defer d.m.Unlock() - - if d.totalBytes >= 0 { - return - } - - if resp.ContentRange == nil { - // ContentRange is nil when the full file contents is provided, and - // is not chunked. Use ContentLength instead. - if resp.ContentLength != nil { - d.totalBytes = *resp.ContentLength - return - } - } else { - parts := strings.Split(*resp.ContentRange, "/") - - total := int64(-1) - var err error - // Checking for whether or not a numbered total exists - // If one does not exist, we will assume the total to be -1, undefined, - // and sequentially download each chunk until hitting a 416 error - totalStr := parts[len(parts)-1] - if totalStr != "*" { - total, err = strconv.ParseInt(totalStr, 10, 64) - if err != nil { - d.err = err - return - } - } - - d.totalBytes = total - } -} - -func (d *downloader) incrWritten(n int64) { - d.m.Lock() - defer d.m.Unlock() - - d.written += n -} - -// getErr is a thread-safe getter for the error object -func (d *downloader) getErr() error { - d.m.Lock() - defer d.m.Unlock() - - return d.err -} - -// setErr is a thread-safe setter for the error object -func (d *downloader) setErr(e error) { - d.m.Lock() - defer d.m.Unlock() - - d.err = e -} - -// dlchunk represents a single chunk of data to write by the worker routine. -// This structure also implements an io.SectionReader style interface for -// io.WriterAt, effectively making it an io.SectionWriter (which does not -// exist). -type dlchunk struct { - w io.WriterAt - start int64 - size int64 - cur int64 - - // specifies the byte range the chunk should be downloaded with. - withRange string -} - -// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start -// position to its end (or EOF). -// -// If a range is specified on the dlchunk the size will be ignored when writing. -// as the total size may not of be known ahead of time. -func (c *dlchunk) Write(p []byte) (n int, err error) { - if c.cur >= c.size && len(c.withRange) == 0 { - return 0, io.EOF - } - - n, err = c.w.WriteAt(p, c.start+c.cur) - c.cur += int64(n) - - return -} - -// ByteRange returns a HTTP Byte-Range header value that should be used by the -// client to request the chunk's range. -func (c *dlchunk) ByteRange() string { - if len(c.withRange) != 0 { - return c.withRange - } - - return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go deleted file mode 100644 index f62e1a45..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go +++ /dev/null @@ -1,65 +0,0 @@ -package s3manager - -import ( - "io" - "sync" -) - -// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker -type ReadSeekerWriteTo interface { - io.ReadSeeker - io.WriterTo -} - -// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt -// implementation. -type BufferedReadSeekerWriteTo struct { - *BufferedReadSeeker -} - -// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or -// an error occurs. Returns the number of bytes written and any error encountered during the write. -func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) { - return io.Copy(writer, b.BufferedReadSeeker) -} - -// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker -type ReadSeekerWriteToProvider interface { - GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) -} - -// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse -// []byte slices for buffering parts in memory -type BufferedReadSeekerWriteToPool struct { - pool sync.Pool -} - -// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create -// a pool of reusable buffers . If size is less then < 64 KiB then the buffer -// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom -// respectively will default to copying 32 KiB. -func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool { - if size < 65536 { - size = 65536 - } - - return &BufferedReadSeekerWriteToPool{ - pool: sync.Pool{New: func() interface{} { - return make([]byte, size) - }}, - } -} - -// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo. -// The provided cleanup must be called after operations have been completed on the -// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool. -func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) { - buffer := p.pool.Get().([]byte) - - r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)} - cleanup = func() { - p.pool.Put(buffer) - } - - return r, cleanup -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go deleted file mode 100644 index 201b3e32..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go +++ /dev/null @@ -1,801 +0,0 @@ -package s3manager - -import ( - "bytes" - "fmt" - "io" - "sort" - "sync" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" -) - -// MaxUploadParts is the maximum allowed number of parts in a multi-part upload -// on Amazon S3. -const MaxUploadParts = 10000 - -// MinUploadPartSize is the minimum allowed part size when uploading a part to -// Amazon S3. -const MinUploadPartSize int64 = 1024 * 1024 * 5 - -// DefaultUploadPartSize is the default part size to buffer chunks of a -// payload into. -const DefaultUploadPartSize = MinUploadPartSize - -// DefaultUploadConcurrency is the default number of goroutines to spin up when -// using Upload(). -const DefaultUploadConcurrency = 5 - -// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned -// will satisfy this interface when a multi part upload failed to upload all -// chucks to S3. In the case of a failure the UploadID is needed to operate on -// the chunks, if any, which were uploaded. -// -// Example: -// -// u := s3manager.NewUploader(opts) -// output, err := u.upload(input) -// if err != nil { -// if multierr, ok := err.(s3manager.MultiUploadFailure); ok { -// // Process error and its associated uploadID -// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID()) -// } else { -// // Process error generically -// fmt.Println("Error:", err.Error()) -// } -// } -// -type MultiUploadFailure interface { - awserr.Error - - // Returns the upload id for the S3 multipart upload that failed. - UploadID() string -} - -// So that the Error interface type can be included as an anonymous field -// in the multiUploadError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A multiUploadError wraps the upload ID of a failed s3 multipart upload. -// Composed of BaseError for code, message, and original error -// -// Should be used for an error that occurred failing a S3 multipart upload, -// and a upload ID is available. If an uploadID is not available a more relevant -type multiUploadError struct { - awsError - - // ID for multipart upload which failed. - uploadID string -} - -// Error returns the string representation of the error. -// -// See apierr.BaseError ErrorWithExtra for output format -// -// Satisfies the error interface. -func (m multiUploadError) Error() string { - extra := fmt.Sprintf("upload id: %s", m.uploadID) - return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (m multiUploadError) String() string { - return m.Error() -} - -// UploadID returns the id of the S3 upload which failed. -func (m multiUploadError) UploadID() string { - return m.uploadID -} - -// UploadOutput represents a response from the Upload() call. -type UploadOutput struct { - // The URL where the object was uploaded to. - Location string - - // The version of the object that was uploaded. Will only be populated if - // the S3 Bucket is versioned. If the bucket is not versioned this field - // will not be set. - VersionID *string - - // The ID for a multipart upload to S3. In the case of an error the error - // can be cast to the MultiUploadFailure interface to extract the upload ID. - UploadID string -} - -// WithUploaderRequestOptions appends to the Uploader's API request options. -func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) { - return func(u *Uploader) { - u.RequestOptions = append(u.RequestOptions, opts...) - } -} - -// The Uploader structure that calls Upload(). It is safe to call Upload() -// on this structure for multiple objects and across concurrent goroutines. -// Mutating the Uploader's properties is not safe to be done concurrently. -type Uploader struct { - // The buffer size (in bytes) to use when buffering data into chunks and - // sending them as parts to S3. The minimum allowed part size is 5MB, and - // if this value is set to zero, the DefaultUploadPartSize value will be used. - PartSize int64 - - // The number of goroutines to spin up in parallel per call to Upload when - // sending parts. If this is set to zero, the DefaultUploadConcurrency value - // will be used. - // - // The concurrency pool is not shared between calls to Upload. - Concurrency int - - // Setting this value to true will cause the SDK to avoid calling - // AbortMultipartUpload on a failure, leaving all successfully uploaded - // parts on S3 for manual recovery. - // - // Note that storing parts of an incomplete multipart upload counts towards - // space usage on S3 and will add additional costs if not cleaned up. - LeavePartsOnError bool - - // MaxUploadParts is the max number of parts which will be uploaded to S3. - // Will be used to calculate the partsize of the object to be uploaded. - // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file - // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). - // - // MaxUploadParts must not be used to limit the total number of bytes uploaded. - // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) - // instead. An io.LimitReader is helpful when uploading an unbounded reader - // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned - // error must be used to signal end of stream. - // - // Defaults to package const's MaxUploadParts value. - MaxUploadParts int - - // The client to use when uploading to S3. - S3 s3iface.S3API - - // List of request options that will be passed down to individual API - // operation requests made by the uploader. - RequestOptions []request.Option - - // Defines the buffer strategy used when uploading a part - BufferProvider ReadSeekerWriteToProvider - - // partPool allows for the re-usage of streaming payload part buffers between upload calls - partPool byteSlicePool -} - -// NewUploader creates a new Uploader instance to upload objects to S3. Pass In -// additional functional options to customize the uploader's behavior. Requires a -// client.ConfigProvider in order to create a S3 service client. The session.Session -// satisfies the client.ConfigProvider interface. -// -// Example: -// // The session the S3 Uploader will use -// sess := session.Must(session.NewSession()) -// -// // Create an uploader with the session and default options -// uploader := s3manager.NewUploader(sess) -// -// // Create an uploader with the session and custom options -// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) { -// u.PartSize = 64 * 1024 * 1024 // 64MB per part -// }) -func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { - return newUploader(s3.New(c), options...) -} - -func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader { - u := &Uploader{ - S3: client, - PartSize: DefaultUploadPartSize, - Concurrency: DefaultUploadConcurrency, - LeavePartsOnError: false, - MaxUploadParts: MaxUploadParts, - BufferProvider: defaultUploadBufferProvider(), - } - - for _, option := range options { - option(u) - } - - u.partPool = newByteSlicePool(u.PartSize) - - return u -} - -// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in -// additional functional options to customize the uploader's behavior. Requires -// a S3 service client to make S3 API calls. -// -// Example: -// // The session the S3 Uploader will use -// sess := session.Must(session.NewSession()) -// -// // S3 service client the Upload manager will use. -// s3Svc := s3.New(sess) -// -// // Create an uploader with S3 client and default options -// uploader := s3manager.NewUploaderWithClient(s3Svc) -// -// // Create an uploader with S3 client and custom options -// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) { -// u.PartSize = 64 * 1024 * 1024 // 64MB per part -// }) -func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { - return newUploader(svc, options...) -} - -// Upload uploads an object to S3, intelligently buffering large files into -// smaller chunks and sending them in parallel across multiple goroutines. You -// can configure the buffer size and concurrency through the Uploader's parameters. -// -// Additional functional options can be provided to configure the individual -// upload. These options are copies of the Uploader instance Upload is called from. -// Modifying the options will not impact the original Uploader instance. -// -// Use the WithUploaderRequestOptions helper function to pass in request -// options that will be applied to all API operations made with this uploader. -// -// It is safe to call this method concurrently across goroutines. -// -// Example: -// // Upload input parameters -// upParams := &s3manager.UploadInput{ -// Bucket: &bucketName, -// Key: &keyName, -// Body: file, -// } -// -// // Perform an upload. -// result, err := uploader.Upload(upParams) -// -// // Perform upload with options different than the those in the Uploader. -// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) { -// u.PartSize = 10 * 1024 * 1024 // 10MB part size -// u.LeavePartsOnError = true // Don't delete the parts if the upload fails. -// }) -func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) { - return u.UploadWithContext(aws.BackgroundContext(), input, options...) -} - -// UploadWithContext uploads an object to S3, intelligently buffering large -// files into smaller chunks and sending them in parallel across multiple -// goroutines. You can configure the buffer size and concurrency through the -// Uploader's parameters. -// -// UploadWithContext is the same as Upload with the additional support for -// Context input parameters. The Context must not be nil. A nil Context will -// cause a panic. Use the context to add deadlining, timeouts, etc. The -// UploadWithContext may create sub-contexts for individual underlying requests. -// -// Additional functional options can be provided to configure the individual -// upload. These options are copies of the Uploader instance Upload is called from. -// Modifying the options will not impact the original Uploader instance. -// -// Use the WithUploaderRequestOptions helper function to pass in request -// options that will be applied to all API operations made with this uploader. -// -// It is safe to call this method concurrently across goroutines. -func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) { - i := uploader{in: input, cfg: u, ctx: ctx} - - for _, opt := range opts { - opt(&i.cfg) - } - - i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager")) - - return i.upload() -} - -// UploadWithIterator will upload a batched amount of objects to S3. This operation uses -// the iterator pattern to know which object to upload next. Since this is an interface this -// allows for custom defined functionality. -// -// Example: -// svc:= s3manager.NewUploader(sess) -// -// objects := []BatchUploadObject{ -// { -// Object: &s3manager.UploadInput { -// Key: aws.String("key"), -// Bucket: aws.String("bucket"), -// }, -// }, -// } -// -// iter := &s3manager.UploadObjectsIterator{Objects: objects} -// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil { -// return err -// } -func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error { - var errs []Error - for iter.Next() { - object := iter.UploadObject() - if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil { - s3Err := Error{ - OrigErr: err, - Bucket: object.Object.Bucket, - Key: object.Object.Key, - } - - errs = append(errs, s3Err) - } - - if object.After == nil { - continue - } - - if err := object.After(); err != nil { - s3Err := Error{ - OrigErr: err, - Bucket: object.Object.Bucket, - Key: object.Object.Key, - } - - errs = append(errs, s3Err) - } - } - - if len(errs) > 0 { - return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs) - } - return nil -} - -// internal structure to manage an upload to S3. -type uploader struct { - ctx aws.Context - cfg Uploader - - in *UploadInput - - readerPos int64 // current reader position - totalSize int64 // set to -1 if the size is not known -} - -// internal logic for deciding whether to upload a single part or use a -// multipart upload. -func (u *uploader) upload() (*UploadOutput, error) { - if err := u.init(); err != nil { - return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err) - } - - if u.cfg.PartSize < MinUploadPartSize { - msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) - return nil, awserr.New("ConfigError", msg, nil) - } - - // Do one read to determine if we have more than one part - reader, _, cleanup, err := u.nextReader() - if err == io.EOF { // single part - return u.singlePart(reader, cleanup) - } else if err != nil { - cleanup() - return nil, awserr.New("ReadRequestBody", "read upload data failed", err) - } - - mu := multiuploader{uploader: u} - return mu.upload(reader, cleanup) -} - -// init will initialize all default options. -func (u *uploader) init() error { - if u.cfg.Concurrency == 0 { - u.cfg.Concurrency = DefaultUploadConcurrency - } - if u.cfg.PartSize == 0 { - u.cfg.PartSize = DefaultUploadPartSize - } - if u.cfg.MaxUploadParts == 0 { - u.cfg.MaxUploadParts = MaxUploadParts - } - - // Try to get the total size for some optimizations - if err := u.initSize(); err != nil { - return err - } - - // If PartSize was changed or partPool was never setup then we need to allocated a new pool - // so that we return []byte slices of the correct size - if u.cfg.partPool == nil || u.cfg.partPool.Size() != u.cfg.PartSize { - u.cfg.partPool = newByteSlicePool(u.cfg.PartSize) - } - - return nil -} - -// initSize tries to detect the total stream size, setting u.totalSize. If -// the size is not known, totalSize is set to -1. -func (u *uploader) initSize() error { - u.totalSize = -1 - - switch r := u.in.Body.(type) { - case io.Seeker: - n, err := aws.SeekerLen(r) - if err != nil { - return err - } - u.totalSize = n - - // Try to adjust partSize if it is too small and account for - // integer division truncation. - if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) { - // Add one to the part size to account for remainders - // during the size calculation. e.g odd number of bytes. - u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 - } - } - - return nil -} - -// nextReader returns a seekable reader representing the next packet of data. -// This operation increases the shared u.readerPos counter, but note that it -// does not need to be wrapped in a mutex because nextReader is only called -// from the main thread. -func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) { - type readerAtSeeker interface { - io.ReaderAt - io.ReadSeeker - } - switch r := u.in.Body.(type) { - case readerAtSeeker: - var err error - - n := u.cfg.PartSize - if u.totalSize >= 0 { - bytesLeft := u.totalSize - u.readerPos - - if bytesLeft <= u.cfg.PartSize { - err = io.EOF - n = bytesLeft - } - } - - var ( - reader io.ReadSeeker - cleanup func() - ) - - reader = io.NewSectionReader(r, u.readerPos, n) - if u.cfg.BufferProvider != nil { - reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader) - } else { - cleanup = func() {} - } - - u.readerPos += n - - return reader, int(n), cleanup, err - - default: - part := u.cfg.partPool.Get() - n, err := readFillBuf(r, part) - u.readerPos += int64(n) - - cleanup := func() { - u.cfg.partPool.Put(part) - } - - return bytes.NewReader(part[0:n]), n, cleanup, err - } -} - -func readFillBuf(r io.Reader, b []byte) (offset int, err error) { - for offset < len(b) && err == nil { - var n int - n, err = r.Read(b[offset:]) - offset += n - } - - return offset, err -} - -// singlePart contains upload logic for uploading a single chunk via -// a regular PutObject request. Multipart requests require at least two -// parts, or at least 5MB of data. -func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) { - defer cleanup() - - params := &s3.PutObjectInput{} - awsutil.Copy(params, u.in) - params.Body = r - - // Need to use request form because URL generated in request is - // used in return. - req, out := u.cfg.S3.PutObjectRequest(params) - req.SetContext(u.ctx) - req.ApplyOptions(u.cfg.RequestOptions...) - if err := req.Send(); err != nil { - return nil, err - } - - url := req.HTTPRequest.URL.String() - return &UploadOutput{ - Location: url, - VersionID: out.VersionId, - }, nil -} - -// internal structure to manage a specific multipart upload to S3. -type multiuploader struct { - *uploader - wg sync.WaitGroup - m sync.Mutex - err error - uploadID string - parts completedParts -} - -// keeps track of a single chunk of data being sent to S3. -type chunk struct { - buf io.ReadSeeker - num int64 - cleanup func() -} - -// completedParts is a wrapper to make parts sortable by their part number, -// since S3 required this list to be sent in sorted order. -type completedParts []*s3.CompletedPart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } - -// upload will perform a multipart upload using the firstBuf buffer containing -// the first chunk of data. -func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { - params := &s3.CreateMultipartUploadInput{} - awsutil.Copy(params, u.in) - - // Create the multipart - resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) - if err != nil { - cleanup() - return nil, err - } - u.uploadID = *resp.UploadId - - // Create the workers - ch := make(chan chunk, u.cfg.Concurrency) - for i := 0; i < u.cfg.Concurrency; i++ { - u.wg.Add(1) - go u.readChunk(ch) - } - - // Send part 1 to the workers - var num int64 = 1 - ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup} - - // Read and queue the rest of the parts - for u.geterr() == nil && err == nil { - var ( - reader io.ReadSeeker - nextChunkLen int - ok bool - ) - - reader, nextChunkLen, cleanup, err = u.nextReader() - ok, err = u.shouldContinue(num, nextChunkLen, err) - if !ok { - cleanup() - if err != nil { - u.seterr(err) - } - break - } - - num++ - - ch <- chunk{buf: reader, num: num, cleanup: cleanup} - } - - // Close the channel, wait for workers, and complete upload - close(ch) - u.wg.Wait() - complete := u.complete() - - if err := u.geterr(); err != nil { - return nil, &multiUploadError{ - awsError: awserr.New( - "MultipartUpload", - "upload multipart failed", - err), - uploadID: u.uploadID, - } - } - - // Create a presigned URL of the S3 Get Object in order to have parity with - // single part upload. - getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - }) - getReq.Config.Credentials = credentials.AnonymousCredentials - uploadLocation, _, _ := getReq.PresignRequest(1) - - return &UploadOutput{ - Location: uploadLocation, - VersionID: complete.VersionId, - UploadID: u.uploadID, - }, nil -} - -func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) { - if err != nil && err != io.EOF { - return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err) - } - - if nextChunkLen == 0 { - // No need to upload empty part, if file was empty to start - // with empty single part would of been created and never - // started multipart upload. - return false, nil - } - - part++ - // This upload exceeded maximum number of supported parts, error now. - if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) { - var msg string - if part > int64(u.cfg.MaxUploadParts) { - msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", - u.cfg.MaxUploadParts) - } else { - msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", - MaxUploadParts) - } - return false, awserr.New("TotalPartsExceeded", msg, nil) - } - - return true, err -} - -// readChunk runs in worker goroutines to pull chunks off of the ch channel -// and send() them as UploadPart requests. -func (u *multiuploader) readChunk(ch chan chunk) { - defer u.wg.Done() - for { - data, ok := <-ch - - if !ok { - break - } - - if u.geterr() == nil { - if err := u.send(data); err != nil { - u.seterr(err) - } - } - } -} - -// send performs an UploadPart request and keeps track of the completed -// part information. -func (u *multiuploader) send(c chunk) error { - params := &s3.UploadPartInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - Body: c.buf, - UploadId: &u.uploadID, - SSECustomerAlgorithm: u.in.SSECustomerAlgorithm, - SSECustomerKey: u.in.SSECustomerKey, - PartNumber: &c.num, - } - - resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...) - c.cleanup() - if err != nil { - return err - } - - n := c.num - completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n} - - u.m.Lock() - u.parts = append(u.parts, completed) - u.m.Unlock() - - return nil -} - -// geterr is a thread-safe getter for the error object -func (u *multiuploader) geterr() error { - u.m.Lock() - defer u.m.Unlock() - - return u.err -} - -// seterr is a thread-safe setter for the error object -func (u *multiuploader) seterr(e error) { - u.m.Lock() - defer u.m.Unlock() - - u.err = e -} - -// fail will abort the multipart unless LeavePartsOnError is set to true. -func (u *multiuploader) fail() { - if u.cfg.LeavePartsOnError { - return - } - - params := &s3.AbortMultipartUploadInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - UploadId: &u.uploadID, - } - _, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) - if err != nil { - logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err)) - } -} - -// complete successfully completes a multipart upload and returns the response. -func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { - if u.geterr() != nil { - u.fail() - return nil - } - - // Parts must be sorted in PartNumber order. - sort.Sort(u.parts) - - params := &s3.CompleteMultipartUploadInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - UploadId: &u.uploadID, - MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, - } - resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) - if err != nil { - u.seterr(err) - u.fail() - } - - return resp -} - -type byteSlicePool interface { - Get() []byte - Put([]byte) - Size() int64 -} - -type partPool struct { - partSize int64 - sync.Pool -} - -func (p *partPool) Get() []byte { - return p.Pool.Get().([]byte) -} - -func (p *partPool) Put(b []byte) { - p.Pool.Put(b) -} - -func (p *partPool) Size() int64 { - return p.partSize -} - -func newPartPool(partSize int64) *partPool { - p := &partPool{partSize: partSize} - - p.New = func() interface{} { - return make([]byte, p.partSize) - } - - return p -} - -var newByteSlicePool = func(partSize int64) byteSlicePool { - return newPartPool(partSize) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go deleted file mode 100644 index 9a5b4638..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go +++ /dev/null @@ -1,171 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3manager - -import ( - "io" - "time" -) - -// UploadInput provides the input parameters for uploading a stream or buffer -// to an object in an Amazon S3 bucket. This type is similar to the s3 -// package's PutObjectInput with the exception that the Body member is an -// io.Reader instead of an io.ReadSeeker. -type UploadInput struct { - _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` - - // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // The readable body payload to send to S3. - Body io.Reader - - // Bucket name to which the PUT operation was initiated. - // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Can be used to specify caching behavior along the request/reply chain. For - // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // The base64-encoded 128-bit MD5 digest of the message (without the headers) - // according to RFC 1864. This header can be used as a message integrity check - // to verify that the data is the same data that was originally sent. Although - // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end - // integrity check. For more information about REST request authentication, - // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). - ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - - // A standard MIME type describing the format of the contents. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Object key for which the PUT operation was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Specifies whether a legal hold will be applied to this object. For more information - // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The Object Lock mode that you want to apply to this object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when you want this object's Object Lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS Encryption Context to use for object encryption. The - // value of this header is a base64-encoded UTF-8 string holding JSON with the - // encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // If x-amz-server-side-encryption is present and has the value of aws:kms, - // this header specifies the ID of the AWS Key Management Service (AWS KMS) - // symmetrical customer managed customer master key (CMK) that was used for - // the object. - // - // If the value of x-amz-server-side-encryption is aws:kms, this header specifies - // the ID of the symmetric customer managed AWS KMS CMK that will be used for - // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not - // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS - // managed CMK in AWS to protect the data. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // If you don't specify, Standard is the default storage class. Amazon S3 supports - // other storage classes. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The tag-set for the object. The tag-set must be encoded as URL Query parameters. - // (For example, "Key1=Value1") - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). - // - // In the following example, the request header sets the redirect to an object - // (anotherPage.html) in the same bucket: - // - // x-amz-website-redirect-location: /anotherPage.html - // - // In the following example, the request header sets the object redirect to - // another website: - // - // x-amz-website-redirect-location: http://www.example.com/ - // - // For more information about website hosting in Amazon S3, see Hosting Websites - // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go deleted file mode 100644 index 765dc07c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go +++ /dev/null @@ -1,75 +0,0 @@ -package s3manager - -import ( - "bufio" - "io" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom -type WriterReadFrom interface { - io.Writer - io.ReaderFrom -} - -// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer -type WriterReadFromProvider interface { - GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func()) -} - -type bufferedWriter interface { - WriterReadFrom - Flush() error - Reset(io.Writer) -} - -type bufferedReadFrom struct { - bufferedWriter -} - -func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) { - n, err := b.bufferedWriter.ReadFrom(r) - if flushErr := b.Flush(); flushErr != nil && err == nil { - err = flushErr - } - return n, err -} - -// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool -// to manage allocation and reuse of *bufio.Writer structures. -type PooledBufferedReadFromProvider struct { - pool sync.Pool -} - -// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider -// Size is used to control the size of the underlying *bufio.Writer created for -// calls to GetReadFrom. -func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider { - if size < int(32*sdkio.KibiByte) { - size = int(64 * sdkio.KibiByte) - } - - return &PooledBufferedReadFromProvider{ - pool: sync.Pool{ - New: func() interface{} { - return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)} - }, - }, - } -} - -// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom -// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom -// has been completed in order to allow the reuse of the *bufio.Writer -func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) { - buffer := p.pool.Get().(*bufferedReadFrom) - buffer.Reset(writer) - r = buffer - cleanup = func() { - buffer.Reset(nil) // Reset to nil writer to release reference - p.pool.Put(buffer) - } - return r, cleanup -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go deleted file mode 100644 index b4c07b4d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/restxml" -) - -// S3 provides the API operation methods for making requests to -// Amazon Simple Storage Service. See this package's package overview docs -// for details on the service. -// -// S3 methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type S3 struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "s3" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "S3" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the S3 client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a S3 client from just a session. -// svc := s3.New(mySession) -// -// // Create a S3 client with additional configuration -// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { - svc := &S3{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2006-03-01", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { - s.DisableURIPathEscaping = true - })) - svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) - - svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) - svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a S3 operation and runs any -// custom request initialization. -func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go deleted file mode 100644 index b71c835d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ /dev/null @@ -1,84 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) - -func validateSSERequiresSSL(r *request.Request) { - if r.HTTPRequest.URL.Scheme == "https" { - return - } - - if iface, ok := r.Params.(sseCustomerKeyGetter); ok { - if len(iface.getSSECustomerKey()) > 0 { - r.Error = errSSERequiresSSL - return - } - } - - if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { - if len(iface.getCopySourceSSECustomerKey()) > 0 { - r.Error = errSSERequiresSSL - return - } - } -} - -const ( - sseKeyHeader = "x-amz-server-side-encryption-customer-key" - sseKeyMD5Header = sseKeyHeader + "-md5" -) - -func computeSSEKeyMD5(r *request.Request) { - var key string - if g, ok := r.Params.(sseCustomerKeyGetter); ok { - key = g.getSSECustomerKey() - } - - computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) -} - -const ( - copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" - copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" -) - -func computeCopySourceSSEKeyMD5(r *request.Request) { - var key string - if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { - key = g.getCopySourceSSECustomerKey() - } - - computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) -} - -func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { - if len(key) == 0 { - // Backwards compatiablity where user just set the header value instead - // of using the API parameter, or setting the header value for an - // operation without the parameters modeled. - key = r.Header.Get(keyHeader) - if len(key) == 0 { - return - } - - // In backwards compatiable, the header's value is not base64 encoded, - // and needs to be encoded and updated by the SDK's customizations. - b64Key := base64.StdEncoding.EncodeToString([]byte(key)) - r.Header.Set(keyHeader, b64Key) - } - - // Only update Key's MD5 if not already set. - if len(r.Header.Get(keyMD5Header)) == 0 { - sum := md5.Sum([]byte(key)) - keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) - r.Header.Set(keyMD5Header, keyMD5) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go deleted file mode 100644 index f6a69aed..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ /dev/null @@ -1,40 +0,0 @@ -package s3 - -import ( - "bytes" - "io/ioutil" - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -func copyMultipartStatusOKUnmarhsalError(r *request.Request) { - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "unable to read response body", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - body := bytes.NewReader(b) - r.HTTPResponse.Body = ioutil.NopCloser(body) - defer body.Seek(0, sdkio.SeekStart) - - if body.Len() == 0 { - // If there is no body don't attempt to parse the body. - return - } - - unmarshalError(r) - if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == request.ErrCodeSerialization { - r.Error = nil - return - } - r.HTTPResponse.StatusCode = http.StatusServiceUnavailable - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go deleted file mode 100644 index 5b63fac7..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ /dev/null @@ -1,88 +0,0 @@ -package s3 - -import ( - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -type xmlErrorResponse struct { - XMLName xml.Name `xml:"Error"` - Code string `xml:"Code"` - Message string `xml:"Message"` -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) - - // Bucket exists in a different region, and request needs - // to be made to the correct region. - if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { - msg := fmt.Sprintf( - "incorrect region, the bucket is not in '%s' region at endpoint '%s'", - aws.StringValue(r.Config.Region), - aws.StringValue(r.Config.Endpoint), - ) - if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { - msg += fmt.Sprintf(", bucket is in '%s' region", v) - } - r.Error = awserr.NewRequestFailure( - awserr.New("BucketRegionError", msg, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Attempt to parse error from body if it is known - var errResp xmlErrorResponse - err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) - if err == io.EOF { - // Only capture the error if an unmarshal error occurs that is not EOF, - // because S3 might send an error without a error message which causes - // the XML unmarshal to fail with EOF. - err = nil - } - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Fallback to status code converted to message if still no error code - if len(errResp.Code) == 0 { - statusText := http.StatusText(r.HTTPResponse.StatusCode) - errResp.Code = strings.Replace(statusText, " ", "", -1) - errResp.Message = statusText - } - - r.Error = awserr.NewRequestFailure( - awserr.New(errResp.Code, errResp.Message, err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) -} - -// A RequestFailure provides access to the S3 Request ID and Host ID values -// returned from API operation errors. Getting the error as a string will -// return the formated error with the same information as awserr.RequestFailure, -// while also adding the HostID value from the response. -type RequestFailure interface { - awserr.RequestFailure - - // Host ID is the S3 Host ID needed for debug, and contacting support - HostID() string -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go deleted file mode 100644 index 2596c694..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go +++ /dev/null @@ -1,214 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// WaitUntilBucketExists uses the Amazon S3 API operation -// HeadBucket to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { - return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilBucketExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 200, - }, - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 301, - }, - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 403, - }, - { - State: request.RetryWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadBucketInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadBucketRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilBucketNotExists uses the Amazon S3 API operation -// HeadBucket to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { - return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilBucketNotExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadBucketInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadBucketRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilObjectExists uses the Amazon S3 API operation -// HeadObject to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { - return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilObjectExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 200, - }, - { - State: request.RetryWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadObjectInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadObjectRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilObjectNotExists uses the Amazon S3 API operation -// HeadObject to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { - return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilObjectNotExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadObjectInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadObjectRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go deleted file mode 100644 index 7f60d4aa..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ /dev/null @@ -1,3115 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opAssumeRole = "AssumeRole" - -// AssumeRoleRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRole operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRole for more information on using the AssumeRole -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleRequest method. -// req, resp := client.AssumeRoleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { - op := &request.Operation{ - Name: opAssumeRole, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleInput{} - } - - output = &AssumeRoleOutput{} - req = c.newRequest(op, input, output) - return -} - -// AssumeRole API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials that you can use to access -// AWS resources that you might not normally have access to. These temporary -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use AssumeRole within your account or for cross-account -// access. For a comparison of AssumeRole with other API operations that produce -// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// You cannot use AWS account root user credentials to call AssumeRole. You -// must use credentials for an IAM user or an IAM role to call AssumeRole. -// -// For cross-account access, imagine that you own multiple accounts and need -// to access resources in each account. You could create long-term credentials -// in each account to access those resources. However, managing all those credentials -// and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account. -// Then use temporary security credentials to access all the other accounts -// by assuming roles in those accounts. For more information about roles, see -// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) -// in the IAM User Guide. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRole last -// for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRole can be used to make -// API calls to any AWS service with the following exception: You cannot call -// the AWS STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// To assume a role from a different account, your AWS account must be trusted -// by the role. The trust relationship is defined in the role's trust policy -// when the role is created. That trust policy states which accounts are allowed -// to delegate that access to users in the account. -// -// A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. If the user is in the same account as the -// role, then you can do either of the following: -// -// * Attach a policy to the user (identical to the previous user in a different -// account). -// -// * Add the user as a principal directly in the role's trust policy. -// -// In this case, the trust policy acts as an IAM resource-based policy. Users -// in the same account as the role do not need explicit permission to assume -// the role. For more information about trust policies and resource-based policies, -// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These tags are -// called session tags. For more information about session tags, see Passing -// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// Using MFA with AssumeRole -// -// (Optional) You can include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios to ensure -// that the user that assumes the role has been authenticated with an AWS MFA -// device. In that scenario, the trust policy of the role being assumed includes -// a condition that tests for MFA authentication. If the caller does not include -// valid MFA information, the request to assume the role is denied. The condition -// in a trust policy that tests for MFA authentication might look like the following -// example. -// -// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} -// -// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. -// -// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode -// parameters. The SerialNumber value identifies the user's hardware or virtual -// MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA device produces. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRole for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - return out, req.Send() -} - -// AssumeRoleWithContext is the same as AssumeRole with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRole for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithSAML = "AssumeRoleWithSAML" - -// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithSAML operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleWithSAMLRequest method. -// req, resp := client.AssumeRoleWithSAMLRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithSAML, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithSAMLInput{} - } - - output = &AssumeRoleWithSAMLOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// AssumeRoleWithSAML API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// via a SAML authentication response. This operation provides a mechanism for -// tying an enterprise identity store or directory to role-based AWS access -// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML -// with the other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this operation consist of -// an access key ID, a secret access key, and a security token. Applications -// can use these temporary security credentials to sign calls to AWS services. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithSAML -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. Your role session lasts for the -// duration that you specify, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. You can provide -// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour -// to 12 hours. To learn how to view the maximum value for your role, see View -// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRoleWithSAML can be used -// to make API calls to any AWS service with the following exception: you cannot -// call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. -// The identity of the caller is validated by using keys in the metadata document -// that is uploaded for the SAML provider entity for your identity provider. -// -// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail -// logs. The entry includes the value in the NameID element of the SAML assertion. -// We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the persistent -// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). -// -// Tags -// -// (Optional) You can configure your IdP to pass attributes into your SAML assertion -// as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see Passing Session Tags -// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can pass up to 50 session tags. The plain text session tag keys can’t -// exceed 128 characters and the values can’t exceed 256 characters. For these -// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// An AWS conversion compresses the passed session policies and session tags -// into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plain text meets the other requirements. The -// PackedPolicySize response element indicates by percentage how close the policies -// and tags for your request are to the upper size limit. -// -// You can pass a session tag with the same key as a tag that is attached to -// the role. When you do, session tags override the role's tags with the same -// key. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// SAML Configuration -// -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, -// you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. -// -// For more information, see the following resources: -// -// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// -// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// -// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// -// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithSAML for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by AWS. Get -// a new identity token from the identity provider and then retry the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - return out, req.Send() -} - -// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithSAML for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" - -// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. -// req, resp := client.AssumeRoleWithWebIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithWebIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithWebIdentityInput{} - } - - output = &AssumeRoleWithWebIdentityOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider. Example providers -// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID -// Connect-compatible identity provider. -// -// For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// to uniquely identify a user. You can also supply the user with a consistent -// identity throughout the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the AWS SDK for iOS Developer Guide. -// -// Calling AssumeRoleWithWebIdentity does not require the use of AWS security -// credentials. Therefore, you can distribute an application (for example, on -// mobile devices) that requests temporary security credentials without including -// long-term AWS credentials in the application. You also don't need to deploy -// server-based proxy services that use long-term AWS credentials. Instead, -// the identity of the caller is validated by using a token from the web identity -// provider. For a comparison of AssumeRoleWithWebIdentity with the other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this API consist of an access -// key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to AWS service API operations. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithWebIdentity -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRoleWithWebIdentity can -// be used to make API calls to any AWS service with the following exception: -// you cannot call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// Tags -// -// (Optional) You can configure your IdP to pass attributes into your web identity -// token as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see Passing Session Tags -// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can pass up to 50 session tags. The plain text session tag keys can’t -// exceed 128 characters and the values can’t exceed 256 characters. For these -// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// An AWS conversion compresses the passed session policies and session tags -// into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plain text meets the other requirements. The -// PackedPolicySize response element indicates by percentage how close the policies -// and tags for your request are to the upper size limit. -// -// You can pass a session tag with the same key as a tag that is attached to -// the role. When you do, the session tag overrides the role tag with the same -// key. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// Identities -// -// Before your application can call AssumeRoleWithWebIdentity, you must have -// an identity token from a supported identity provider and create a role that -// the application can assume. The role that your application assumes must trust -// the identity provider that is associated with the identity token. In other -// words, the identity provider must be specified in the role's trust policy. -// -// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail -// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided Web Identity Token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you -// could instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). -// -// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity -// API, see the following resources: -// -// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). -// Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then -// using those credentials to make a request to AWS. -// -// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and -// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. -// -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon -// S3. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithWebIdentity for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the identity provider (IDP) that -// was asked to verify the incoming identity token could not be reached. This -// is often a transient error caused by network conditions. Retry the request -// a limited number of times so that you don't exceed the request rate. If the -// error persists, the identity provider might be down or not responding. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by AWS. Get -// a new identity token from the identity provider and then retry the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - return out, req.Send() -} - -// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithWebIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" - -// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the -// client's request for the DecodeAuthorizationMessage operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DecodeAuthorizationMessageRequest method. -// req, resp := client.DecodeAuthorizationMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { - op := &request.Operation{ - Name: opDecodeAuthorizationMessage, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DecodeAuthorizationMessageInput{} - } - - output = &DecodeAuthorizationMessageOutput{} - req = c.newRequest(op, input, output) - return -} - -// DecodeAuthorizationMessage API operation for AWS Security Token Service. -// -// Decodes additional information about the authorization status of a request -// from an encoded message returned in response to an AWS request. -// -// For example, if a user is not authorized to perform an operation that he -// or she has requested, the request returns a Client.UnauthorizedOperation -// response (an HTTP 403 response). Some AWS operations additionally return -// an encoded message that can provide details about this authorization failure. -// -// Only certain AWS operations return an encoded authorization message. The -// documentation for an individual operation indicates whether that operation -// returns an encoded message in addition to returning an HTTP code. -// -// The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the operation -// should not see. To decode an authorization status message, a user must be -// granted permissions via an IAM policy to request the DecodeAuthorizationMessage -// (sts:DecodeAuthorizationMessage) action. -// -// The decoded message includes the following type of information: -// -// * Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. -// -// * The principal who made the request. -// -// * The requested action. -// -// * The requested resource. -// -// * The values of condition keys in the context of the user's request. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation DecodeAuthorizationMessage for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - return out, req.Send() -} - -// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of -// the ability to pass a context and additional request options. -// -// See DecodeAuthorizationMessage for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetAccessKeyInfo = "GetAccessKeyInfo" - -// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the -// client's request for the GetAccessKeyInfo operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetAccessKeyInfoRequest method. -// req, resp := client.GetAccessKeyInfoRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { - op := &request.Operation{ - Name: opGetAccessKeyInfo, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetAccessKeyInfoInput{} - } - - output = &GetAccessKeyInfoOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetAccessKeyInfo API operation for AWS Security Token Service. -// -// Returns the account identifier for the specified access key ID. -// -// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) -// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). -// For more information about access keys, see Managing Access Keys for IAM -// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. -// -// When you pass an access key ID to this operation, it returns the ID of the -// AWS account to which the keys belong. Access key IDs beginning with AKIA -// are long-term credentials for an IAM user or the AWS account root user. Access -// key IDs beginning with ASIA are temporary credentials that are created using -// STS operations. If the account in the response belongs to you, you can sign -// in as the root user and review your root user access keys. Then, you can -// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail -// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) -// in the IAM User Guide. -// -// This operation does not indicate the state of the access key. The key might -// be active, inactive, or deleted. Active keys might not have permissions to -// perform an operation. Providing a deleted access key might return an error -// that the key doesn't exist. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetAccessKeyInfo for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - return out, req.Send() -} - -// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of -// the ability to pass a context and additional request options. -// -// See GetAccessKeyInfo for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCallerIdentity = "GetCallerIdentity" - -// GetCallerIdentityRequest generates a "aws/request.Request" representing the -// client's request for the GetCallerIdentity operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCallerIdentity for more information on using the GetCallerIdentity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetCallerIdentityRequest method. -// req, resp := client.GetCallerIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { - op := &request.Operation{ - Name: opGetCallerIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCallerIdentityInput{} - } - - output = &GetCallerIdentityOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCallerIdentity API operation for AWS Security Token Service. -// -// Returns details about the IAM user or role whose credentials are used to -// call the operation. -// -// No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetCallerIdentity for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - return out, req.Send() -} - -// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See GetCallerIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetFederationToken = "GetFederationToken" - -// GetFederationTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetFederationToken operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetFederationToken for more information on using the GetFederationToken -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetFederationTokenRequest method. -// req, resp := client.GetFederationTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { - op := &request.Operation{ - Name: opGetFederationToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetFederationTokenInput{} - } - - output = &GetFederationTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetFederationToken API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// You can create a mobile-based or browser-based app that can authenticate -// users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider. In this case, we recommend -// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. -// For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. -// -// You can also call GetFederationToken using the security credentials of an -// AWS account root user, but we do not recommend it. Instead, we recommend -// that you create an IAM user for the purpose of the proxy application. Then -// attach a policy to the IAM user that limits federated users to only the actions -// and resources that they need to access. For more information, see IAM Best -// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// Session duration -// -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using AWS account root user credentials have a maximum duration -// of 3,600 seconds (1 hour). -// -// Permissions -// -// You can use the temporary credentials created by GetFederationToken in any -// AWS service except the following: -// -// * You cannot call any IAM operations using the AWS CLI or the AWS API. -// -// * You cannot call any STS operations except GetCallerIdentity. -// -// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM -// user policies and the session policies that you pass. This gives you a way -// to further restrict the permissions for a federated user. You cannot use -// session policies to grant more permissions than those that are defined in -// the permissions policy of the IAM user. For more information, see Session -// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to -// create temporary security credentials, see GetFederationToken—Federation -// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// -// You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session -// in the Principal element of the policy, the session has the permissions allowed -// by the policy. These permissions are granted in addition to the permissions -// granted by the session policies. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These are called -// session tags. For more information about session tags, see Passing Session -// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// Tag key–value pairs are not case sensitive, but case is preserved. This -// means that you cannot have separate Department and department tag keys. Assume -// that the user that you are federating has the Department=Marketing tag and -// you pass the department=engineering session tag. Department and department -// are not saved as separate tags, and the session tag passed in the request -// takes precedence over the user tag. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetFederationToken for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - return out, req.Send() -} - -// GetFederationTokenWithContext is the same as GetFederationToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetFederationToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSessionToken = "GetSessionToken" - -// GetSessionTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetSessionToken operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSessionToken for more information on using the GetSessionToken -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSessionTokenRequest method. -// req, resp := client.GetSessionTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { - op := &request.Operation{ - Name: opGetSessionToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSessionTokenInput{} - } - - output = &GetSessionTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSessionToken API operation for AWS Security Token Service. -// -// Returns a set of temporary credentials for an AWS account or IAM user. The -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use GetSessionToken if you want to use MFA to protect -// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. -// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA -// code that is associated with their MFA device. Using the temporary security -// credentials that are returned from the call, IAM users can then make programmatic -// calls to API operations that require MFA authentication. If you do not supply -// a correct MFA code, then the API returns an access denied error. For a comparison -// of GetSessionToken with the other API operations that produce temporary credentials, -// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// Session Duration -// -// The GetSessionToken operation must be called by using the long-term AWS security -// credentials of the AWS account root user or an IAM user. Credentials that -// are created by IAM users are valid for the duration that you specify. This -// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 -// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials -// based on account credentials can range from 900 seconds (15 minutes) up to -// 3,600 seconds (1 hour), with a default of 1 hour. -// -// Permissions -// -// The temporary security credentials created by GetSessionToken can be used -// to make API calls to any AWS service with the following exceptions: -// -// * You cannot call any IAM API operations unless MFA authentication information -// is included in the request. -// -// * You cannot call any STS API except AssumeRole or GetCallerIdentity. -// -// We recommend that you do not call GetSessionToken with AWS account root user -// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with AWS. -// -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using AWS account root user credentials, the -// temporary credentials have root user permissions. Similarly, if GetSessionToken -// is called using the credentials of an IAM user, the temporary credentials -// have the same permissions as the IAM user. -// -// For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetSessionToken for usage and error information. -// -// Returned Error Codes: -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - return out, req.Send() -} - -// GetSessionTokenWithContext is the same as GetSessionToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetSessionToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -type AssumeRoleInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // A unique identifier that might be required when you assume a role in another - // account. If the administrator of the account to which the role belongs provided - // you with an external ID, then provide that value in the ExternalId parameter. - // This value can be any string, such as a passphrase or account number. A cross-account - // role is usually set up to trust everyone in an account. Therefore, the administrator - // of the trusting account might send an external ID to the administrator of - // the trusted account. That way, only someone with the ID can assume the role, - // rather than everyone in the account. For more information about the external - // ID, see How to Use an External ID When Granting Access to Your AWS Resources - // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - ExternalId *string `min:"2" type:"string"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The Amazon Resource Name (ARN) of the role to assume. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. - // - // Use the role session name to uniquely identify a session when the same role - // is assumed by different principals or for different reasons. In cross-account - // scenarios, the role session name is visible to, and can be logged by the - // account that owns the role. The role session name is also used in the ARN - // of the assumed role principal. This means that subsequent cross-account API - // requests that use the temporary security credentials will expose the role - // session name to the external account in their AWS CloudTrail logs. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SerialNumber *string `min:"9" type:"string"` - - // A list of session tags that you want to pass. Each session tag consists of - // a key name and an associated value. For more information about session tags, - // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plain - // text session tag keys can’t exceed 128 characters, and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the role. When you do, session tags override a role tag with the same - // key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This - // means that you cannot have separate Department and department tag keys. Assume - // that the role has the Department=Marketing tag and you pass the department=engineering - // session tag. Department and department are not saved as separate tags, and - // the session tag passed in the request takes precedence over the role tag. - // - // Additionally, if you used temporary credentials to perform this operation, - // the new session inherits any transitive session tags from the calling session. - // If you pass a session tag with the same key as an inherited tag, the operation - // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. - // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) - // in the IAM User Guide. - Tags []*Tag `type:"list"` - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` - - // A list of keys for session tags that you want to set as transitive. If you - // set a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. - // - // This parameter is optional. When you set session tags as transitive, the - // session policy and session tags packed binary limit is not affected. - // - // If you choose not to specify a transitive tag key, then no tags are passed - // from this session to any subsequent sessions. - TransitiveTagKeys []*string `type:"list"` -} - -// String returns the string representation -func (s AssumeRoleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.ExternalId != nil && len(*s.ExternalId) < 2 { - invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { - s.DurationSeconds = &v - return s -} - -// SetExternalId sets the ExternalId field's value. -func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { - s.ExternalId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { - s.PolicyArns = v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { - s.RoleSessionName = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { - s.SerialNumber = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { - s.Tags = v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { - s.TokenCode = &v - return s -} - -// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. -func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { - s.TransitiveTagKeys = v - return s -} - -// Contains the response to a successful AssumeRole request, including temporary -// AWS credentials that can be used to make AWS requests. -type AssumeRoleOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation -func (s AssumeRoleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { - s.AssumedRoleUser = v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { - s.PackedPolicySize = &v - return s -} - -type AssumeRoleWithSAMLInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. Your role session lasts for - // the duration that you specify for the DurationSeconds parameter, or until - // the time specified in the SAML authentication response's SessionNotOnOrAfter - // value, whichever is shorter. You can provide a DurationSeconds value from - // 900 seconds (15 minutes) up to the maximum session duration setting for the - // role. This setting can have a value from 1 hour to 12 hours. If you specify - // a value higher than this setting, the operation fails. For example, if you - // specify a session duration of 12 hours, but your administrator set the maximum - // session duration to 6 hours, your operation fails. To learn how to view the - // maximum value for your role, see View the Maximum Session Duration Setting - // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes - // the IdP. - // - // PrincipalArn is a required field - PrincipalArn *string `min:"20" type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // The base-64 encoded SAML authentication response provided by the IdP. - // - // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. - // - // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumeRoleWithSAMLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithSAMLInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithSAMLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PrincipalArn == nil { - invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) - } - if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.SAMLAssertion == nil { - invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) - } - if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { - invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { - s.PolicyArns = v - return s -} - -// SetPrincipalArn sets the PrincipalArn field's value. -func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { - s.PrincipalArn = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { - s.RoleArn = &v - return s -} - -// SetSAMLAssertion sets the SAMLAssertion field's value. -func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { - s.SAMLAssertion = &v - return s -} - -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary AWS credentials that can be used to make AWS requests. -type AssumeRoleWithSAMLOutput struct { - _ struct{} `type:"structure"` - - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The value of the Recipient attribute of the SubjectConfirmationData element - // of the SAML assertion. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // The value of the Issuer element of the SAML assertion. - Issuer *string `type:"string"` - - // A hash value based on the concatenation of the Issuer response value, the - // AWS account ID, and the friendly name (the last part of the ARN) of the SAML - // provider in IAM. The combination of NameQualifier and Subject can be used - // to uniquely identify a federated user. - // - // The following pseudocode shows how the hash value is calculated: - // - // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" - // ) ) - NameQualifier *string `type:"string"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The value of the NameID element in the Subject element of the SAML assertion. - Subject *string `type:"string"` - - // The format of the name ID, as defined by the Format attribute in the NameID - // element of the SAML assertion. Typical examples of the format are transient - // or persistent. - // - // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, - // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient - // is returned as transient. If the format includes any other prefix, the format - // is returned with no modifications. - SubjectType *string `type:"string"` -} - -// String returns the string representation -func (s AssumeRoleWithSAMLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithSAMLOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { - s.Credentials = v - return s -} - -// SetIssuer sets the Issuer field's value. -func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { - s.Issuer = &v - return s -} - -// SetNameQualifier sets the NameQualifier field's value. -func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { - s.NameQualifier = &v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { - s.PackedPolicySize = &v - return s -} - -// SetSubject sets the Subject field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { - s.Subject = &v - return s -} - -// SetSubjectType sets the SubjectType field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { - s.SubjectType = &v - return s -} - -type AssumeRoleWithWebIdentityInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The fully qualified host component of the domain name of the identity provider. - // - // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com - // and graph.facebook.com are the only supported identity providers for OAuth - // 2.0 access tokens. Do not include URL schemes and port numbers. - // - // Do not specify this value for OpenID Connect ID tokens. - ProviderId *string `min:"4" type:"string"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. Typically, you pass the name - // or identifier that is associated with the user who is using your application. - // That way, the temporary security credentials that your application will use - // are associated with that user. This session name is included as part of the - // ARN and assumed role ID in the AssumedRoleUser response element. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The OAuth 2.0 access token or OpenID Connect ID token that is provided by - // the identity provider. Your application must get this token by authenticating - // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. - // - // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumeRoleWithWebIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithWebIdentityInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithWebIdentityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.ProviderId != nil && len(*s.ProviderId) < 4 { - invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.WebIdentityToken == nil { - invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) - } - if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { - invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { - s.PolicyArns = v - return s -} - -// SetProviderId sets the ProviderId field's value. -func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { - s.ProviderId = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { - s.RoleSessionName = &v - return s -} - -// SetWebIdentityToken sets the WebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { - s.WebIdentityToken = &v - return s -} - -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary AWS credentials that can be used to make AWS requests. -type AssumeRoleWithWebIdentityOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The intended audience (also known as client ID) of the web identity token. - // This is traditionally the client identifier issued to the application that - // requested the web identity token. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The issuing authority of the web identity token presented. For OpenID Connect - // ID tokens, this contains the value of the iss field. For OAuth 2.0 access - // tokens, this contains the value of the ProviderId parameter that was passed - // in the AssumeRoleWithWebIdentity request. - Provider *string `type:"string"` - - // The unique user identifier that is returned by the identity provider. This - // identifier is associated with the WebIdentityToken that was submitted with - // the AssumeRoleWithWebIdentity call. The identifier is typically unique to - // the user and the application that acquired the WebIdentityToken (pairwise - // identifier). For OpenID Connect ID tokens, this field contains the value - // returned by the identity provider as the token's sub (Subject) claim. - SubjectFromWebIdentityToken *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s AssumeRoleWithWebIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumeRoleWithWebIdentityOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { - s.PackedPolicySize = &v - return s -} - -// SetProvider sets the Provider field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { - s.Provider = &v - return s -} - -// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { - s.SubjectFromWebIdentityToken = &v - return s -} - -// The identifiers for the temporary security credentials that the operation -// returns. -type AssumedRoleUser struct { - _ struct{} `type:"structure"` - - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // A unique identifier that contains the role ID and the role session name of - // the role that is being assumed. The role ID is generated by AWS when the - // role is created. - // - // AssumedRoleId is a required field - AssumedRoleId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s AssumedRoleUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AssumedRoleUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { - s.Arn = &v - return s -} - -// SetAssumedRoleId sets the AssumedRoleId field's value. -func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { - s.AssumedRoleId = &v - return s -} - -// AWS credentials for API authentication. -type Credentials struct { - _ struct{} `type:"structure"` - - // The access key ID that identifies the temporary security credentials. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` - - // The date on which the current credentials expire. - // - // Expiration is a required field - Expiration *time.Time `type:"timestamp" required:"true"` - - // The secret access key that can be used to sign requests. - // - // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` - - // The token that users must pass to the service API to use the temporary credentials. - // - // SessionToken is a required field - SessionToken *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Credentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Credentials) GoString() string { - return s.String() -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *Credentials) SetAccessKeyId(v string) *Credentials { - s.AccessKeyId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *Credentials) SetExpiration(v time.Time) *Credentials { - s.Expiration = &v - return s -} - -// SetSecretAccessKey sets the SecretAccessKey field's value. -func (s *Credentials) SetSecretAccessKey(v string) *Credentials { - s.SecretAccessKey = &v - return s -} - -// SetSessionToken sets the SessionToken field's value. -func (s *Credentials) SetSessionToken(v string) *Credentials { - s.SessionToken = &v - return s -} - -type DecodeAuthorizationMessageInput struct { - _ struct{} `type:"structure"` - - // The encoded message that was returned with the response. - // - // EncodedMessage is a required field - EncodedMessage *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DecodeAuthorizationMessageInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecodeAuthorizationMessageInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecodeAuthorizationMessageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} - if s.EncodedMessage == nil { - invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) - } - if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncodedMessage sets the EncodedMessage field's value. -func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { - s.EncodedMessage = &v - return s -} - -// A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an AWS -// request. -type DecodeAuthorizationMessageOutput struct { - _ struct{} `type:"structure"` - - // An XML document that contains the decoded message. - DecodedMessage *string `type:"string"` -} - -// String returns the string representation -func (s DecodeAuthorizationMessageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DecodeAuthorizationMessageOutput) GoString() string { - return s.String() -} - -// SetDecodedMessage sets the DecodedMessage field's value. -func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { - s.DecodedMessage = &v - return s -} - -// Identifiers for the federated user that is associated with the credentials. -type FederatedUser struct { - _ struct{} `type:"structure"` - - // The ARN that specifies the federated user that is associated with the credentials. - // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // The string that identifies the federated user associated with the credentials, - // similar to the unique ID of an IAM user. - // - // FederatedUserId is a required field - FederatedUserId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation -func (s FederatedUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FederatedUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *FederatedUser) SetArn(v string) *FederatedUser { - s.Arn = &v - return s -} - -// SetFederatedUserId sets the FederatedUserId field's value. -func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { - s.FederatedUserId = &v - return s -} - -type GetAccessKeyInfoInput struct { - _ struct{} `type:"structure"` - - // The identifier of an access key. - // - // This parameter allows (through its regex pattern) a string of characters - // that can consist of any upper- or lowercase letter or digit. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetAccessKeyInfoInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAccessKeyInfoInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetAccessKeyInfoInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} - if s.AccessKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) - } - if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { - invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { - s.AccessKeyId = &v - return s -} - -type GetAccessKeyInfoOutput struct { - _ struct{} `type:"structure"` - - // The number used to identify the AWS account. - Account *string `type:"string"` -} - -// String returns the string representation -func (s GetAccessKeyInfoOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAccessKeyInfoOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { - s.Account = &v - return s -} - -type GetCallerIdentityInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s GetCallerIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCallerIdentityInput) GoString() string { - return s.String() -} - -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. -type GetCallerIdentityOutput struct { - _ struct{} `type:"structure"` - - // The AWS account ID number of the account that owns or contains the calling - // entity. - Account *string `type:"string"` - - // The AWS ARN associated with the calling entity. - Arn *string `min:"20" type:"string"` - - // The unique identifier of the calling entity. The exact value depends on the - // type of entity that is making the call. The values returned are those listed - // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. - UserId *string `type:"string"` -} - -// String returns the string representation -func (s GetCallerIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCallerIdentityOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { - s.Account = &v - return s -} - -// SetArn sets the Arn field's value. -func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { - s.Arn = &v - return s -} - -// SetUserId sets the UserId field's value. -func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { - s.UserId = &v - return s -} - -type GetFederationTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds - // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using AWS account root user credentials are restricted to a maximum of 3,600 - // seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using root user credentials defaults to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob). For example, you can reference - // the federated user name in a resource-based policy, such as in an Amazon - // S3 bucket policy. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // Name is a required field - Name *string `min:"2" type:"string" required:"true"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The resulting credentials can be used to access a resource that has a resource-based - // policy. If that policy specifically references the federated user session - // in the Principal element of the policy, the session has the permissions allowed - // by the policy. These permissions are granted in addition to the permissions - // that are granted by the session policies. - // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as a managed session policy. The policies must exist in the same account - // as the IAM user that is requesting federated access. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plain text that you use for both inline - // and managed session policies can't exceed 2,048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The resulting credentials can be used to access a resource that has a resource-based - // policy. If that policy specifically references the federated user session - // in the Principal element of the policy, the session has the permissions allowed - // by the policy. These permissions are granted in addition to the permissions - // that are granted by the session policies. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // A list of session tags. Each session tag consists of a key name and an associated - // value. For more information about session tags, see Passing Session Tags - // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plain - // text session tag keys can’t exceed 128 characters and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the user you are federating. When you do, session tags override a user - // tag with the same key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This - // means that you cannot have separate Department and department tag keys. Assume - // that the role has the Department=Marketing tag and you pass the department=engineering - // session tag. Department and department are not saved as separate tags, and - // the session tag passed in the request takes precedence over the role tag. - Tags []*Tag `type:"list"` -} - -// String returns the string representation -func (s GetFederationTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFederationTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFederationTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { - s.DurationSeconds = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { - s.Name = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { - s.PolicyArns = v - return s -} - -// SetTags sets the Tags field's value. -func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { - s.Tags = v - return s -} - -// Contains the response to a successful GetFederationToken request, including -// temporary AWS credentials that can be used to make AWS requests. -type GetFederationTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // Identifiers for the federated user associated with the credentials (such - // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You - // can use the federated user's ARN in your resource-based policies, such as - // an Amazon S3 bucket policy. - FederatedUser *FederatedUser `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation -func (s GetFederationTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFederationTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { - s.Credentials = v - return s -} - -// SetFederatedUser sets the FederatedUser field's value. -func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { - s.FederatedUser = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { - s.PackedPolicySize = &v - return s -} - -type GetSessionTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 - // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions - // for AWS account owners are restricted to a maximum of 3,600 seconds (one - // hour). If the duration is longer than one hour, the session for AWS account - // owners defaults to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The identification number of the MFA device that is associated with the IAM - // user who is making the GetSessionToken call. Specify this value if the IAM - // user has a policy that requires MFA authentication. The value is either the - // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource - // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the AWS Management Console - // and viewing the user's security credentials. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - SerialNumber *string `min:"9" type:"string"` - - // The value provided by the MFA device, if MFA is required. If any policy requires - // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, the user must provide a code when requesting a set of temporary - // security credentials. A user who fails to provide the code receives an "access - // denied" response when requesting resources that require MFA authentication. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` -} - -// String returns the string representation -func (s GetSessionTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSessionTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSessionTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { - s.DurationSeconds = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { - s.SerialNumber = &v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { - s.TokenCode = &v - return s -} - -// Contains the response to a successful GetSessionToken request, including -// temporary AWS credentials that can be used to make AWS requests. -type GetSessionTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` -} - -// String returns the string representation -func (s GetSessionTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSessionTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { - s.Credentials = v - return s -} - -// A reference to the IAM managed policy that is passed as a session policy -// for a role session or a federated user session. -type PolicyDescriptorType struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource - // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - Arn *string `locationName:"arn" min:"20" type:"string"` -} - -// String returns the string representation -func (s PolicyDescriptorType) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PolicyDescriptorType) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PolicyDescriptorType) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} - if s.Arn != nil && len(*s.Arn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetArn sets the Arn field's value. -func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { - s.Arn = &v - return s -} - -// You can pass custom key-value pair attributes when you assume a role or federate -// a user. These are called session tags. You can then use the session tags -// to control access to resources. For more information, see Tagging AWS STS -// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -type Tag struct { - _ struct{} `type:"structure"` - - // The key for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag keys can’t - // exceed 128 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // The value for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go deleted file mode 100644 index d5307fca..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go +++ /dev/null @@ -1,11 +0,0 @@ -package sts - -import "github.com/aws/aws-sdk-go/aws/request" - -func init() { - initRequest = customizeRequest -} - -func customizeRequest(r *request.Request) { - r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go deleted file mode 100644 index fcb720dc..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sts provides the client and types for making API -// requests to AWS Security Token Service. -// -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// By default, AWS Security Token Service (STS) is available as a global service, -// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. -// Global requests map to the US East (N. Virginia) region. AWS recommends using -// Regional AWS STS endpoints instead of the global endpoint to reduce latency, -// build in redundancy, and increase session token validity. For more information, -// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Most AWS Regions are enabled for operations in all AWS services by default. -// Those Regions are automatically activated for use with AWS STS. Some Regions, -// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more -// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) -// in the AWS General Reference. When you enable these AWS Regions, they are -// automatically activated for use with AWS STS. You cannot activate the STS -// endpoint for a Region that is disabled. Tokens that are valid in all AWS -// Regions are longer than tokens that are valid in Regions that are enabled -// by default. Changing this setting might affect existing systems where you -// temporarily store tokens. For more information, see Managing Global Endpoint -// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) -// in the IAM User Guide. -// -// After you activate a Region for use with AWS STS, you can direct AWS STS -// API calls to that Region. AWS STS recommends that you provide both the Region -// and endpoint when you make calls to a Regional endpoint. You can provide -// the Region alone for manually enabled Regions, such as Asia Pacific (Hong -// Kong). In this case, the calls are directed to the STS Regional endpoint. -// However, if you provide the Region alone for Regions enabled by default, -// the calls are directed to the global endpoint of https://sts.amazonaws.com. -// -// To view the list of AWS STS endpoints and whether they are active by default, -// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) -// in the IAM User Guide. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. -// -// If you activate AWS STS endpoints in Regions other than the default global -// endpoint, then you must also turn on CloudTrail logging in those Regions. -// This is necessary to record any AWS STS API calls that are made in those -// Regions. For more information, see Turning On CloudTrail in Additional Regions -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) -// in the AWS CloudTrail User Guide. -// -// AWS Security Token Service (STS) is a global service with a single endpoint -// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls -// to a global service. However, because this endpoint is physically located -// in the US East (N. Virginia) Region, your logs list us-east-1 as the event -// Region. CloudTrail does not write these logs to the US East (Ohio) Region -// unless you choose to include global service logs in that Region. CloudTrail -// writes calls to all Regional endpoints to their respective Regions. For example, -// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) -// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU -// (Frankfurt) Region. -// -// To learn more about CloudTrail, including how to turn it on and find your -// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). -// -// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. -// -// See sts package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ -// -// Using the Client -// -// To contact AWS Security Token Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Security Token Service client STS for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New -package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go deleted file mode 100644 index a233f542..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ /dev/null @@ -1,82 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -const ( - - // ErrCodeExpiredTokenException for service response error code - // "ExpiredTokenException". - // - // The web identity token that was passed is expired or is not valid. Get a - // new identity token from the identity provider and then retry the request. - ErrCodeExpiredTokenException = "ExpiredTokenException" - - // ErrCodeIDPCommunicationErrorException for service response error code - // "IDPCommunicationError". - // - // The request could not be fulfilled because the identity provider (IDP) that - // was asked to verify the incoming identity token could not be reached. This - // is often a transient error caused by network conditions. Retry the request - // a limited number of times so that you don't exceed the request rate. If the - // error persists, the identity provider might be down or not responding. - ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" - - // ErrCodeIDPRejectedClaimException for service response error code - // "IDPRejectedClaim". - // - // The identity provider (IdP) reported that authentication failed. This might - // be because the claim is invalid. - // - // If this error is returned for the AssumeRoleWithWebIdentity operation, it - // can also mean that the claim has expired or has been explicitly revoked. - ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" - - // ErrCodeInvalidAuthorizationMessageException for service response error code - // "InvalidAuthorizationMessageException". - // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. - ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" - - // ErrCodeInvalidIdentityTokenException for service response error code - // "InvalidIdentityToken". - // - // The web identity token that was passed could not be validated by AWS. Get - // a new identity token from the identity provider and then retry the request. - ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" - - // ErrCodeMalformedPolicyDocumentException for service response error code - // "MalformedPolicyDocument". - // - // The request was rejected because the policy document was malformed. The error - // message describes the specific error. - ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" - - // ErrCodePackedPolicyTooLargeException for service response error code - // "PackedPolicyTooLarge". - // - // The request was rejected because the total packed size of the session policies - // and session tags combined was too large. An AWS conversion compresses the - // session policy document, session policy ARNs, and session tags into a packed - // binary format that has a separate limit. The error message indicates by percentage - // how close the policies and tags are to the upper size limit. For more information, - // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // You could receive this error even though you meet other defined session policy - // and session tag limits. For more information, see IAM and STS Entity Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the IAM User Guide. - ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" - - // ErrCodeRegionDisabledException for service response error code - // "RegionDisabledException". - // - // STS is not activated in the requested region for the account that is being - // asked to generate credentials. The account administrator must use the IAM - // console to activate STS in that region. For more information, see Activating - // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the IAM User Guide. - ErrCodeRegionDisabledException = "RegionDisabledException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go deleted file mode 100644 index d34a6855..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ /dev/null @@ -1,98 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/query" -) - -// STS provides the API operation methods for making requests to -// AWS Security Token Service. See this package's package overview docs -// for details on the service. -// -// STS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type STS struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "sts" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "STS" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the STS client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a STS client from just a session. -// svc := sts.New(mySession) -// -// // Create a STS client with additional configuration -// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { - svc := &STS{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2011-06-15", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(query.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a STS operation and runs any -// custom request initialization. -func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go deleted file mode 100644 index e2e1d6ef..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package stsiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sts" -) - -// STSAPI provides an interface to enable mocking the -// sts.STS service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // AWS Security Token Service. -// func myFunc(svc stsiface.STSAPI) bool { -// // Make svc.AssumeRole request -// } -// -// func main() { -// sess := session.New() -// svc := sts.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSTSClient struct { -// stsiface.STSAPI -// } -// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSTSClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type STSAPI interface { - AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) - AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) - AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) - - AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) - - AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) - - DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) - - GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) - - GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) - - GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) - GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) - GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) - - GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) - GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) - GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) -} - -var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/clbanning/mxj/LICENSE b/vendor/github.com/clbanning/mxj/LICENSE deleted file mode 100644 index f27bccdf..00000000 --- a/vendor/github.com/clbanning/mxj/LICENSE +++ /dev/null @@ -1,55 +0,0 @@ -Copyright (c) 2012-2016 Charles Banning . All rights reserved. - -The MIT License (MIT) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -=============================================================================== - -Go Language Copyright & License - - -Copyright 2009 The Go Authors. All rights reserved. -Use of this source code is governed by a BSD-style -license that can be found in the LICENSE file. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/clbanning/mxj/anyxml.go b/vendor/github.com/clbanning/mxj/anyxml.go deleted file mode 100644 index ec2f3dfd..00000000 --- a/vendor/github.com/clbanning/mxj/anyxml.go +++ /dev/null @@ -1,189 +0,0 @@ -package mxj - -import ( - "encoding/xml" - "reflect" -) - -const ( - DefaultElementTag = "element" -) - -// Encode arbitrary value as XML. -// -// Note: unmarshaling the resultant -// XML may not return the original value, since tag labels may have been injected -// to create the XML representation of the value. -/* - Encode an arbitrary JSON object. - package main - - import ( - "encoding/json" - "fmt" - "github.com/clbanning/mxj" - ) - - func main() { - jsondata := []byte(`[ - { "somekey":"somevalue" }, - "string", - 3.14159265, - true - ]`) - var i interface{} - err := json.Unmarshal(jsondata, &i) - if err != nil { - // do something - } - x, err := mxj.AnyXmlIndent(i, "", " ", "mydoc") - if err != nil { - // do something else - } - fmt.Println(string(x)) - } - - output: - - somevalue - string - 3.14159265 - true - -*/ -// Alternative values for DefaultRootTag and DefaultElementTag can be set as: -// AnyXml( v, myRootTag, myElementTag). -func AnyXml(v interface{}, tags ...string) ([]byte, error) { - var rt, et string - if len(tags) == 1 || len(tags) == 2 { - rt = tags[0] - } else { - rt = DefaultRootTag - } - if len(tags) == 2 { - et = tags[1] - } else { - et = DefaultElementTag - } - - if v == nil { - if useGoXmlEmptyElemSyntax { - return []byte("<" + rt + ">"), nil - } - return []byte("<" + rt + "/>"), nil - } - if reflect.TypeOf(v).Kind() == reflect.Struct { - return xml.Marshal(v) - } - - var err error - s := new(string) - p := new(pretty) - - var ss string - var b []byte - switch v.(type) { - case []interface{}: - ss = "<" + rt + ">" - for _, vv := range v.([]interface{}) { - switch vv.(type) { - case map[string]interface{}: - m := vv.(map[string]interface{}) - if len(m) == 1 { - for tag, val := range m { - err = mapToXmlIndent(false, s, tag, val, p) - } - } else { - err = mapToXmlIndent(false, s, et, vv, p) - } - default: - err = mapToXmlIndent(false, s, et, vv, p) - } - if err != nil { - break - } - } - ss += *s + "" - b = []byte(ss) - case map[string]interface{}: - m := Map(v.(map[string]interface{})) - b, err = m.Xml(rt) - default: - err = mapToXmlIndent(false, s, rt, v, p) - b = []byte(*s) - } - - return b, err -} - -// Encode an arbitrary value as a pretty XML string. -// Alternative values for DefaultRootTag and DefaultElementTag can be set as: -// AnyXmlIndent( v, "", " ", myRootTag, myElementTag). -func AnyXmlIndent(v interface{}, prefix, indent string, tags ...string) ([]byte, error) { - var rt, et string - if len(tags) == 1 || len(tags) == 2 { - rt = tags[0] - } else { - rt = DefaultRootTag - } - if len(tags) == 2 { - et = tags[1] - } else { - et = DefaultElementTag - } - - if v == nil { - if useGoXmlEmptyElemSyntax { - return []byte(prefix + "<" + rt + ">"), nil - } - return []byte(prefix + "<" + rt + "/>"), nil - } - if reflect.TypeOf(v).Kind() == reflect.Struct { - return xml.MarshalIndent(v, prefix, indent) - } - - var err error - s := new(string) - p := new(pretty) - p.indent = indent - p.padding = prefix - - var ss string - var b []byte - switch v.(type) { - case []interface{}: - ss = "<" + rt + ">\n" - p.Indent() - for _, vv := range v.([]interface{}) { - switch vv.(type) { - case map[string]interface{}: - m := vv.(map[string]interface{}) - if len(m) == 1 { - for tag, val := range m { - err = mapToXmlIndent(true, s, tag, val, p) - } - } else { - p.start = 1 // we 1 tag in - err = mapToXmlIndent(true, s, et, vv, p) - *s += "\n" - } - default: - p.start = 0 // in case trailing p.start = 1 - err = mapToXmlIndent(true, s, et, vv, p) - } - if err != nil { - break - } - } - ss += *s + "" - b = []byte(ss) - case map[string]interface{}: - m := Map(v.(map[string]interface{})) - b, err = m.XmlIndent(prefix, indent, rt) - default: - err = mapToXmlIndent(true, s, rt, v, p) - b = []byte(*s) - } - - return b, err -} diff --git a/vendor/github.com/clbanning/mxj/atomFeedString.xml b/vendor/github.com/clbanning/mxj/atomFeedString.xml deleted file mode 100644 index 474575a4..00000000 --- a/vendor/github.com/clbanning/mxj/atomFeedString.xml +++ /dev/null @@ -1,54 +0,0 @@ - -Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub -2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a - An attempt at adding pubsubhubbub support to Rietveld. -http://code.google.com/p/pubsubhubbub -http://code.google.com/p/rietveld/issues/detail?id=155 - -The server side of the protocol is trivial: - 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all - feeds that will be pubsubhubbubbed. - 2. every time one of those feeds changes, tell the hub - with a simple POST request. - -I have tested this by adding debug prints to a local hub -server and checking that the server got the right publish -requests. - -I can&#39;t quite get the server to work, but I think the bug -is not in my code. I think that the server expects to be -able to grab the feed and see the feed&#39;s actual URL in -the link rel=&quot;self&quot;, but the default value for that drops -the :port from the URL, and I cannot for the life of me -figure out how to get the Atom generator deep inside -django not to do that, or even where it is doing that, -or even what code is running to generate the Atom feed. -(I thought I knew but I added some assert False statements -and it kept running!) - -Ignoring that particular problem, I would appreciate -feedback on the right way to get the two values at -the top of feeds.py marked NOTE(rsc). - - -rietveld: correct tab handling -2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a - This fixes the buggy tab rendering that can be seen at -http://codereview.appspot.com/116075/diff/1/2 - -The fundamental problem was that the tab code was -not being told what column the text began in, so it -didn&#39;t know where to put the tab stops. Another problem -was that some of the code assumed that string byte -offsets were the same as column offsets, which is only -true if there are no tabs. - -In the process of fixing this, I cleaned up the arguments -to Fold and ExpandTabs and renamed them Break and -_ExpandTabs so that I could be sure that I found all the -call sites. I also wanted to verify that ExpandTabs was -not being used from outside intra_region_diff.py. - - - ` - diff --git a/vendor/github.com/clbanning/mxj/doc.go b/vendor/github.com/clbanning/mxj/doc.go deleted file mode 100644 index 8ed79a5a..00000000 --- a/vendor/github.com/clbanning/mxj/doc.go +++ /dev/null @@ -1,134 +0,0 @@ -// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. -// Copyright 2012-2015, 2018 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -/* -Marshal/Unmarshal XML to/from map[string]interface{} values (and JSON); extract/modify values from maps by key or key-path, including wildcards. - -mxj supplants the legacy x2j and j2x packages. The subpackage x2j-wrapper is provided to facilitate migrating from the x2j package. The x2j and j2x subpackages provide similar functionality of the old packages but are not function-name compatible with them. - -Note: this library was designed for processing ad hoc anonymous messages. Bulk processing large data sets may be much more efficiently performed using the encoding/xml or encoding/json packages from Go's standard library directly. - -Related Packages: - checkxml: github.com/clbanning/checkxml provides functions for validating XML data. - -Notes: - 2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc. - 2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps. - 2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package. - 2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing. - 2017.02.21: github.com/clbanning/checkxml provides functions for validating XML data. - 2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods. - 2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag(). - 2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc. - 2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix(). - 2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable. - 2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars(). - 2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf". - To cast them to float64, first set flag with CastNanInf(true). - 2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure. - 2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization. - 2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM). - 2015-12-02: NewMapXmlSeq() with mv.XmlSeq() & co. will try to preserve structure of XML doc when re-encoding. - 2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML. - -SUMMARY - - type Map map[string]interface{} - - Create a Map value, 'mv', from any map[string]interface{} value, 'v': - mv := Map(v) - - Unmarshal / marshal XML as a Map value, 'mv': - mv, err := NewMapXml(xmlValue) // unmarshal - xmlValue, err := mv.Xml() // marshal - - Unmarshal XML from an io.Reader as a Map value, 'mv': - mv, err := NewMapXmlReader(xmlReader) // repeated calls, as with an os.File Reader, will process stream - mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded - - Marshal Map value, 'mv', to an XML Writer (io.Writer): - err := mv.XmlWriter(xmlWriter) - raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter - - Also, for prettified output: - xmlValue, err := mv.XmlIndent(prefix, indent, ...) - err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...) - raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...) - - Bulk process XML with error handling (note: handlers must return a boolean value): - err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error)) - err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte)) - - Converting XML to JSON: see Examples for NewMapXml and HandleXmlReader. - - There are comparable functions and methods for JSON processing. - - Arbitrary structure values can be decoded to / encoded from Map values: - mv, err := NewMapStruct(structVal) - err := mv.Struct(structPointer) - - To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON - or structure to a Map value, 'mv', or cast a map[string]interface{} value to a Map value, 'mv', then: - paths := mv.PathsForKey(key) - path := mv.PathForKeyShortest(key) - values, err := mv.ValuesForKey(key, subkeys) - values, err := mv.ValuesForPath(path, subkeys) // 'path' can be dot-notation with wildcards and indexed arrays. - count, err := mv.UpdateValuesForPath(newVal, path, subkeys) - - Get everything at once, irrespective of path depth: - leafnodes := mv.LeafNodes() - leafvalues := mv.LeafValues() - - A new Map with whatever keys are desired can be created from the current Map and then encoded in XML - or JSON. (Note: keys can use dot-notation. 'oldKey' can also use wildcards and indexed arrays.) - newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N") - newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go" - newXml, err := newMap.Xml() // for example - newJson, err := newMap.Json() // ditto - -XML PARSING CONVENTIONS - - Using NewMapXml() - - - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`, - to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or - `SetAttrPrefix()`.) - - If the element is a simple element and has attributes, the element value - is given the key `#text` for its `map[string]interface{}` representation. (See - the 'atomFeedString.xml' test data, below.) - - XML comments, directives, and process instructions are ignored. - - If CoerceKeysToLower() has been called, then the resultant keys will be lower case. - - Using NewMapXmlSeq() - - - Attributes are parsed to `map["#attr"]map[]map[string]interface{}`values - where the `` value has "#text" and "#seq" keys - the "#text" key holds the - value for ``. - - All elements, except for the root, have a "#seq" key. - - Comments, directives, and process instructions are unmarshalled into the Map using the - keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more - specifics.) - - Name space syntax is preserved: - - something parses to map["ns:key"]interface{}{"something"} - - xmlns:ns="http://myns.com/ns" parses to map["xmlns:ns"]interface{}{"http://myns.com/ns"} - - Both - - - By default, "Nan", "Inf", and "-Inf" values are not cast to float64. If you want them - to be cast, set a flag to cast them using CastNanInf(true). - -XML ENCODING CONVENTIONS - - - 'nil' Map values, which may represent 'null' JSON values, are encoded as "". - NOTE: the operation is not symmetric as "" elements are decoded as 'tag:""' Map values, - which, then, encode in JSON as '"tag":""' values.. - - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one. (Go - randomizes the walk through map[string]interface{} values.) If you plan to re-encode the - Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and - mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when - working with the Map representation. - -*/ -package mxj diff --git a/vendor/github.com/clbanning/mxj/escapechars.go b/vendor/github.com/clbanning/mxj/escapechars.go deleted file mode 100644 index bee0442c..00000000 --- a/vendor/github.com/clbanning/mxj/escapechars.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2016 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -package mxj - -import ( - "bytes" -) - -var xmlEscapeChars bool - -// XMLEscapeChars(true) forces escaping invalid characters in attribute and element values. -// NOTE: this is brute force with NO interrogation of '&' being escaped already; if it is -// then '&' will be re-escaped as '&amp;'. -// -/* - The values are: - " " - ' ' - < < - > > - & & -*/ -func XMLEscapeChars(b bool) { - xmlEscapeChars = b -} - -// Scan for '&' first, since 's' may contain "&" that is parsed to "&amp;" -// - or "<" that is parsed to "&lt;". -var escapechars = [][2][]byte{ - {[]byte(`&`), []byte(`&`)}, - {[]byte(`<`), []byte(`<`)}, - {[]byte(`>`), []byte(`>`)}, - {[]byte(`"`), []byte(`"`)}, - {[]byte(`'`), []byte(`'`)}, -} - -func escapeChars(s string) string { - if len(s) == 0 { - return s - } - - b := []byte(s) - for _, v := range escapechars { - n := bytes.Count(b, v[0]) - if n == 0 { - continue - } - b = bytes.Replace(b, v[0], v[1], n) - } - return string(b) -} - diff --git a/vendor/github.com/clbanning/mxj/exists.go b/vendor/github.com/clbanning/mxj/exists.go deleted file mode 100644 index 2fb3084b..00000000 --- a/vendor/github.com/clbanning/mxj/exists.go +++ /dev/null @@ -1,7 +0,0 @@ -package mxj - -// Checks whether the path exists -func (mv Map) Exists(path string, subkeys ...string) bool { - v, err := mv.ValuesForPath(path, subkeys...) - return err == nil && len(v) > 0 -} diff --git a/vendor/github.com/clbanning/mxj/files.go b/vendor/github.com/clbanning/mxj/files.go deleted file mode 100644 index 27e06e1e..00000000 --- a/vendor/github.com/clbanning/mxj/files.go +++ /dev/null @@ -1,287 +0,0 @@ -package mxj - -import ( - "fmt" - "io" - "os" -) - -type Maps []Map - -func NewMaps() Maps { - return make(Maps, 0) -} - -type MapRaw struct { - M Map - R []byte -} - -// NewMapsFromXmlFile - creates an array from a file of JSON values. -func NewMapsFromJsonFile(name string) (Maps, error) { - fi, err := os.Stat(name) - if err != nil { - return nil, err - } - if !fi.Mode().IsRegular() { - return nil, fmt.Errorf("file %s is not a regular file", name) - } - - fh, err := os.Open(name) - if err != nil { - return nil, err - } - defer fh.Close() - - am := make([]Map, 0) - for { - m, raw, err := NewMapJsonReaderRaw(fh) - if err != nil && err != io.EOF { - return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw)) - } - if len(m) > 0 { - am = append(am, m) - } - if err == io.EOF { - break - } - } - return am, nil -} - -// ReadMapsFromJsonFileRaw - creates an array of MapRaw from a file of JSON values. -func NewMapsFromJsonFileRaw(name string) ([]MapRaw, error) { - fi, err := os.Stat(name) - if err != nil { - return nil, err - } - if !fi.Mode().IsRegular() { - return nil, fmt.Errorf("file %s is not a regular file", name) - } - - fh, err := os.Open(name) - if err != nil { - return nil, err - } - defer fh.Close() - - am := make([]MapRaw, 0) - for { - mr := new(MapRaw) - mr.M, mr.R, err = NewMapJsonReaderRaw(fh) - if err != nil && err != io.EOF { - return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R)) - } - if len(mr.M) > 0 { - am = append(am, *mr) - } - if err == io.EOF { - break - } - } - return am, nil -} - -// NewMapsFromXmlFile - creates an array from a file of XML values. -func NewMapsFromXmlFile(name string) (Maps, error) { - fi, err := os.Stat(name) - if err != nil { - return nil, err - } - if !fi.Mode().IsRegular() { - return nil, fmt.Errorf("file %s is not a regular file", name) - } - - fh, err := os.Open(name) - if err != nil { - return nil, err - } - defer fh.Close() - - am := make([]Map, 0) - for { - m, raw, err := NewMapXmlReaderRaw(fh) - if err != nil && err != io.EOF { - return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw)) - } - if len(m) > 0 { - am = append(am, m) - } - if err == io.EOF { - break - } - } - return am, nil -} - -// NewMapsFromXmlFileRaw - creates an array of MapRaw from a file of XML values. -// NOTE: the slice with the raw XML is clean with no extra capacity - unlike NewMapXmlReaderRaw(). -// It is slow at parsing a file from disk and is intended for relatively small utility files. -func NewMapsFromXmlFileRaw(name string) ([]MapRaw, error) { - fi, err := os.Stat(name) - if err != nil { - return nil, err - } - if !fi.Mode().IsRegular() { - return nil, fmt.Errorf("file %s is not a regular file", name) - } - - fh, err := os.Open(name) - if err != nil { - return nil, err - } - defer fh.Close() - - am := make([]MapRaw, 0) - for { - mr := new(MapRaw) - mr.M, mr.R, err = NewMapXmlReaderRaw(fh) - if err != nil && err != io.EOF { - return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R)) - } - if len(mr.M) > 0 { - am = append(am, *mr) - } - if err == io.EOF { - break - } - } - return am, nil -} - -// ------------------------ Maps writing ------------------------- -// These are handy-dandy methods for dumping configuration data, etc. - -// JsonString - analogous to mv.Json() -func (mvs Maps) JsonString(safeEncoding ...bool) (string, error) { - var s string - for _, v := range mvs { - j, err := v.Json() - if err != nil { - return s, err - } - s += string(j) - } - return s, nil -} - -// JsonStringIndent - analogous to mv.JsonIndent() -func (mvs Maps) JsonStringIndent(prefix, indent string, safeEncoding ...bool) (string, error) { - var s string - var haveFirst bool - for _, v := range mvs { - j, err := v.JsonIndent(prefix, indent) - if err != nil { - return s, err - } - if haveFirst { - s += "\n" - } else { - haveFirst = true - } - s += string(j) - } - return s, nil -} - -// XmlString - analogous to mv.Xml() -func (mvs Maps) XmlString() (string, error) { - var s string - for _, v := range mvs { - x, err := v.Xml() - if err != nil { - return s, err - } - s += string(x) - } - return s, nil -} - -// XmlStringIndent - analogous to mv.XmlIndent() -func (mvs Maps) XmlStringIndent(prefix, indent string) (string, error) { - var s string - for _, v := range mvs { - x, err := v.XmlIndent(prefix, indent) - if err != nil { - return s, err - } - s += string(x) - } - return s, nil -} - -// JsonFile - write Maps to named file as JSON -// Note: the file will be created, if necessary; if it exists it will be truncated. -// If you need to append to a file, open it and use JsonWriter method. -func (mvs Maps) JsonFile(file string, safeEncoding ...bool) error { - var encoding bool - if len(safeEncoding) == 1 { - encoding = safeEncoding[0] - } - s, err := mvs.JsonString(encoding) - if err != nil { - return err - } - fh, err := os.Create(file) - if err != nil { - return err - } - defer fh.Close() - fh.WriteString(s) - return nil -} - -// JsonFileIndent - write Maps to named file as pretty JSON -// Note: the file will be created, if necessary; if it exists it will be truncated. -// If you need to append to a file, open it and use JsonIndentWriter method. -func (mvs Maps) JsonFileIndent(file, prefix, indent string, safeEncoding ...bool) error { - var encoding bool - if len(safeEncoding) == 1 { - encoding = safeEncoding[0] - } - s, err := mvs.JsonStringIndent(prefix, indent, encoding) - if err != nil { - return err - } - fh, err := os.Create(file) - if err != nil { - return err - } - defer fh.Close() - fh.WriteString(s) - return nil -} - -// XmlFile - write Maps to named file as XML -// Note: the file will be created, if necessary; if it exists it will be truncated. -// If you need to append to a file, open it and use XmlWriter method. -func (mvs Maps) XmlFile(file string) error { - s, err := mvs.XmlString() - if err != nil { - return err - } - fh, err := os.Create(file) - if err != nil { - return err - } - defer fh.Close() - fh.WriteString(s) - return nil -} - -// XmlFileIndent - write Maps to named file as pretty XML -// Note: the file will be created,if necessary; if it exists it will be truncated. -// If you need to append to a file, open it and use XmlIndentWriter method. -func (mvs Maps) XmlFileIndent(file, prefix, indent string) error { - s, err := mvs.XmlStringIndent(prefix, indent) - if err != nil { - return err - } - fh, err := os.Create(file) - if err != nil { - return err - } - defer fh.Close() - fh.WriteString(s) - return nil -} diff --git a/vendor/github.com/clbanning/mxj/files_test.badjson b/vendor/github.com/clbanning/mxj/files_test.badjson deleted file mode 100644 index d1872004..00000000 --- a/vendor/github.com/clbanning/mxj/files_test.badjson +++ /dev/null @@ -1,2 +0,0 @@ -{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" } -{ "with":"some", "bad":JSON, "in":"it" } diff --git a/vendor/github.com/clbanning/mxj/files_test.badxml b/vendor/github.com/clbanning/mxj/files_test.badxml deleted file mode 100644 index 4736ef97..00000000 --- a/vendor/github.com/clbanning/mxj/files_test.badxml +++ /dev/null @@ -1,9 +0,0 @@ - - test - for files.go - - - some - doc - test case - diff --git a/vendor/github.com/clbanning/mxj/files_test.json b/vendor/github.com/clbanning/mxj/files_test.json deleted file mode 100644 index e9a3ddf4..00000000 --- a/vendor/github.com/clbanning/mxj/files_test.json +++ /dev/null @@ -1,2 +0,0 @@ -{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" } -{ "with":"just", "two":2, "JSON":"values", "true":true } diff --git a/vendor/github.com/clbanning/mxj/files_test.xml b/vendor/github.com/clbanning/mxj/files_test.xml deleted file mode 100644 index 65cf021f..00000000 --- a/vendor/github.com/clbanning/mxj/files_test.xml +++ /dev/null @@ -1,9 +0,0 @@ - - test - for files.go - - - some - doc - test case - diff --git a/vendor/github.com/clbanning/mxj/files_test_dup.json b/vendor/github.com/clbanning/mxj/files_test_dup.json deleted file mode 100644 index 2becb6a4..00000000 --- a/vendor/github.com/clbanning/mxj/files_test_dup.json +++ /dev/null @@ -1 +0,0 @@ -{"a":"test","file":"for","files_test.go":"case","this":"is"}{"JSON":"values","true":true,"two":2,"with":"just"} \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/files_test_dup.xml b/vendor/github.com/clbanning/mxj/files_test_dup.xml deleted file mode 100644 index f68d22e2..00000000 --- a/vendor/github.com/clbanning/mxj/files_test_dup.xml +++ /dev/null @@ -1 +0,0 @@ -for files.gotestdoctest casesome \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/files_test_indent.json b/vendor/github.com/clbanning/mxj/files_test_indent.json deleted file mode 100644 index 6fde1563..00000000 --- a/vendor/github.com/clbanning/mxj/files_test_indent.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "a": "test", - "file": "for", - "files_test.go": "case", - "this": "is" -} -{ - "JSON": "values", - "true": true, - "two": 2, - "with": "just" -} \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/files_test_indent.xml b/vendor/github.com/clbanning/mxj/files_test_indent.xml deleted file mode 100644 index 8c91a1dc..00000000 --- a/vendor/github.com/clbanning/mxj/files_test_indent.xml +++ /dev/null @@ -1,8 +0,0 @@ - - for files.go - test - - doc - test case - some - \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/gob.go b/vendor/github.com/clbanning/mxj/gob.go deleted file mode 100644 index d56c2fd6..00000000 --- a/vendor/github.com/clbanning/mxj/gob.go +++ /dev/null @@ -1,35 +0,0 @@ -// gob.go - Encode/Decode a Map into a gob object. - -package mxj - -import ( - "bytes" - "encoding/gob" -) - -// NewMapGob returns a Map value for a gob object that has been -// encoded from a map[string]interface{} (or compatible type) value. -// It is intended to provide symmetric handling of Maps that have -// been encoded using mv.Gob. -func NewMapGob(gobj []byte) (Map, error) { - m := make(map[string]interface{}, 0) - if len(gobj) == 0 { - return m, nil - } - r := bytes.NewReader(gobj) - dec := gob.NewDecoder(r) - if err := dec.Decode(&m); err != nil { - return m, err - } - return m, nil -} - -// Gob returns a gob-encoded value for the Map 'mv'. -func (mv Map) Gob() ([]byte, error) { - var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - if err := enc.Encode(map[string]interface{}(mv)); err != nil { - return nil, err - } - return buf.Bytes(), nil -} diff --git a/vendor/github.com/clbanning/mxj/json.go b/vendor/github.com/clbanning/mxj/json.go deleted file mode 100644 index eb2c05a1..00000000 --- a/vendor/github.com/clbanning/mxj/json.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2012-2014 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -package mxj - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "time" -) - -// ------------------------------ write JSON ----------------------- - -// Just a wrapper on json.Marshal. -// If option safeEncoding is'true' then safe encoding of '<', '>' and '&' -// is preserved. (see encoding/json#Marshal, encoding/json#Encode) -func (mv Map) Json(safeEncoding ...bool) ([]byte, error) { - var s bool - if len(safeEncoding) == 1 { - s = safeEncoding[0] - } - - b, err := json.Marshal(mv) - - if !s { - b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) - b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) - b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) - } - return b, err -} - -// Just a wrapper on json.MarshalIndent. -// If option safeEncoding is'true' then safe encoding of '<' , '>' and '&' -// is preserved. (see encoding/json#Marshal, encoding/json#Encode) -func (mv Map) JsonIndent(prefix, indent string, safeEncoding ...bool) ([]byte, error) { - var s bool - if len(safeEncoding) == 1 { - s = safeEncoding[0] - } - - b, err := json.MarshalIndent(mv, prefix, indent) - if !s { - b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) - b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) - b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) - } - return b, err -} - -// The following implementation is provided for symmetry with NewMapJsonReader[Raw] -// The names will also provide a key for the number of return arguments. - -// Writes the Map as JSON on the Writer. -// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. -func (mv Map) JsonWriter(jsonWriter io.Writer, safeEncoding ...bool) error { - b, err := mv.Json(safeEncoding...) - if err != nil { - return err - } - - _, err = jsonWriter.Write(b) - return err -} - -// Writes the Map as JSON on the Writer. []byte is the raw JSON that was written. -// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. -func (mv Map) JsonWriterRaw(jsonWriter io.Writer, safeEncoding ...bool) ([]byte, error) { - b, err := mv.Json(safeEncoding...) - if err != nil { - return b, err - } - - _, err = jsonWriter.Write(b) - return b, err -} - -// Writes the Map as pretty JSON on the Writer. -// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. -func (mv Map) JsonIndentWriter(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) error { - b, err := mv.JsonIndent(prefix, indent, safeEncoding...) - if err != nil { - return err - } - - _, err = jsonWriter.Write(b) - return err -} - -// Writes the Map as pretty JSON on the Writer. []byte is the raw JSON that was written. -// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. -func (mv Map) JsonIndentWriterRaw(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) ([]byte, error) { - b, err := mv.JsonIndent(prefix, indent, safeEncoding...) - if err != nil { - return b, err - } - - _, err = jsonWriter.Write(b) - return b, err -} - -// --------------------------- read JSON ----------------------------- - -// Decode numericvalues as json.Number type Map values - see encoding/json#Number. -// NOTE: this is for decoding JSON into a Map with NewMapJson(), NewMapJsonReader(), -// etc.; it does not affect NewMapXml(), etc. The XML encoders mv.Xml() and mv.XmlIndent() -// do recognize json.Number types; a JSON object can be decoded to a Map with json.Number -// value types and the resulting Map can be correctly encoded into a XML object. -var JsonUseNumber bool - -// Just a wrapper on json.Unmarshal -// Converting JSON to XML is a simple as: -// ... -// mapVal, merr := mxj.NewMapJson(jsonVal) -// if merr != nil { -// // handle error -// } -// xmlVal, xerr := mapVal.Xml() -// if xerr != nil { -// // handle error -// } -// NOTE: as a special case, passing a list, e.g., [{"some-null-value":"", "a-non-null-value":"bar"}], -// will be interpreted as having the root key 'object' prepended - {"object":[ ... ]} - to unmarshal to a Map. -// See mxj/j2x/j2x_test.go. -func NewMapJson(jsonVal []byte) (Map, error) { - // empty or nil begets empty - if len(jsonVal) == 0 { - m := make(map[string]interface{}, 0) - return m, nil - } - // handle a goofy case ... - if jsonVal[0] == '[' { - jsonVal = []byte(`{"object":` + string(jsonVal) + `}`) - } - m := make(map[string]interface{}) - // err := json.Unmarshal(jsonVal, &m) - buf := bytes.NewReader(jsonVal) - dec := json.NewDecoder(buf) - if JsonUseNumber { - dec.UseNumber() - } - err := dec.Decode(&m) - return m, err -} - -// Retrieve a Map value from an io.Reader. -// NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an -// os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte -// value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal -// a JSON object. -func NewMapJsonReader(jsonReader io.Reader) (Map, error) { - jb, err := getJson(jsonReader) - if err != nil || len(*jb) == 0 { - return nil, err - } - - // Unmarshal the 'presumed' JSON string - return NewMapJson(*jb) -} - -// Retrieve a Map value and raw JSON - []byte - from an io.Reader. -// NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an -// os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte -// value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal -// a JSON object and retrieve the raw JSON in a single call. -func NewMapJsonReaderRaw(jsonReader io.Reader) (Map, []byte, error) { - jb, err := getJson(jsonReader) - if err != nil || len(*jb) == 0 { - return nil, *jb, err - } - - // Unmarshal the 'presumed' JSON string - m, merr := NewMapJson(*jb) - return m, *jb, merr -} - -// Pull the next JSON string off the stream: just read from first '{' to its closing '}'. -// Returning a pointer to the slice saves 16 bytes - maybe unnecessary, but internal to package. -func getJson(rdr io.Reader) (*[]byte, error) { - bval := make([]byte, 1) - jb := make([]byte, 0) - var inQuote, inJson bool - var parenCnt int - var previous byte - - // scan the input for a matched set of {...} - // json.Unmarshal will handle syntax checking. - for { - _, err := rdr.Read(bval) - if err != nil { - if err == io.EOF && inJson && parenCnt > 0 { - return &jb, fmt.Errorf("no closing } for JSON string: %s", string(jb)) - } - return &jb, err - } - switch bval[0] { - case '{': - if !inQuote { - parenCnt++ - inJson = true - } - case '}': - if !inQuote { - parenCnt-- - } - if parenCnt < 0 { - return nil, fmt.Errorf("closing } without opening {: %s", string(jb)) - } - case '"': - if inQuote { - if previous == '\\' { - break - } - inQuote = false - } else { - inQuote = true - } - case '\n', '\r', '\t', ' ': - if !inQuote { - continue - } - } - if inJson { - jb = append(jb, bval[0]) - if parenCnt == 0 { - break - } - } - previous = bval[0] - } - - return &jb, nil -} - -// ------------------------------- JSON Reader handler via Map values ----------------------- - -// Default poll delay to keep Handler from spinning on an open stream -// like sitting on os.Stdin waiting for imput. -var jhandlerPollInterval = time.Duration(1e6) - -// While unnecessary, we make HandleJsonReader() have the same signature as HandleXmlReader(). -// This avoids treating one or other as a special case and discussing the underlying stdlib logic. - -// Bulk process JSON using handlers that process a Map value. -// 'rdr' is an io.Reader for the JSON (stream). -// 'mapHandler' is the Map processing handler. Return of 'false' stops io.Reader processing. -// 'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error. -// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. -// This means that you can stop reading the file on error or after processing a particular message. -// To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'. -func HandleJsonReader(jsonReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error { - var n int - for { - m, merr := NewMapJsonReader(jsonReader) - n++ - - // handle error condition with errhandler - if merr != nil && merr != io.EOF { - merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error()) - if ok := errHandler(merr); !ok { - // caused reader termination - return merr - } - continue - } - - // pass to maphandler - if len(m) != 0 { - if ok := mapHandler(m); !ok { - break - } - } else if merr != io.EOF { - <-time.After(jhandlerPollInterval) - } - - if merr == io.EOF { - break - } - } - return nil -} - -// Bulk process JSON using handlers that process a Map value and the raw JSON. -// 'rdr' is an io.Reader for the JSON (stream). -// 'mapHandler' is the Map and raw JSON - []byte - processor. Return of 'false' stops io.Reader processing. -// 'errHandler' is the error and raw JSON processor. Return of 'false' stops io.Reader processing and returns the error. -// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. -// This means that you can stop reading the file on error or after processing a particular message. -// To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'. -func HandleJsonReaderRaw(jsonReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error { - var n int - for { - m, raw, merr := NewMapJsonReaderRaw(jsonReader) - n++ - - // handle error condition with errhandler - if merr != nil && merr != io.EOF { - merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error()) - if ok := errHandler(merr, raw); !ok { - // caused reader termination - return merr - } - continue - } - - // pass to maphandler - if len(m) != 0 { - if ok := mapHandler(m, raw); !ok { - break - } - } else if merr != io.EOF { - <-time.After(jhandlerPollInterval) - } - - if merr == io.EOF { - break - } - } - return nil -} diff --git a/vendor/github.com/clbanning/mxj/keyvalues.go b/vendor/github.com/clbanning/mxj/keyvalues.go deleted file mode 100644 index 0b244c87..00000000 --- a/vendor/github.com/clbanning/mxj/keyvalues.go +++ /dev/null @@ -1,671 +0,0 @@ -// Copyright 2012-2014 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -// keyvalues.go: Extract values from an arbitrary XML doc. Tag path can include wildcard characters. - -package mxj - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -// ----------------------------- get everything FOR a single key ------------------------- - -const ( - minArraySize = 32 -) - -var defaultArraySize int = minArraySize - -// Adjust the buffers for expected number of values to return from ValuesForKey() and ValuesForPath(). -// This can have the effect of significantly reducing memory allocation-copy functions for large data sets. -// Returns the initial buffer size. -func SetArraySize(size int) int { - if size > minArraySize { - defaultArraySize = size - } else { - defaultArraySize = minArraySize - } - return defaultArraySize -} - -// Return all values in Map, 'mv', associated with a 'key'. If len(returned_values) == 0, then no match. -// On error, the returned slice is 'nil'. NOTE: 'key' can be wildcard, "*". -// 'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list. -// - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them. -// - For attributes prefix the label with a hyphen, '-', e.g., "-seq:3". -// - If the 'key' refers to a list, then "key:value" could select a list member of the list. -// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. -// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an -// exclusion critera - e.g., "!author:William T. Gaddis". -// - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|". -func (mv Map) ValuesForKey(key string, subkeys ...string) ([]interface{}, error) { - m := map[string]interface{}(mv) - var subKeyMap map[string]interface{} - if len(subkeys) > 0 { - var err error - subKeyMap, err = getSubKeyMap(subkeys...) - if err != nil { - return nil, err - } - } - - ret := make([]interface{}, 0, defaultArraySize) - var cnt int - hasKey(m, key, &ret, &cnt, subKeyMap) - return ret[:cnt], nil -} - -var KeyNotExistError = errors.New("Key does not exist") - -// ValueForKey is a wrapper on ValuesForKey. It returns the first member of []interface{}, if any. -// If there is no value, "nil, nil" is returned. -func (mv Map) ValueForKey(key string, subkeys ...string) (interface{}, error) { - vals, err := mv.ValuesForKey(key, subkeys...) - if err != nil { - return nil, err - } - if len(vals) == 0 { - return nil, KeyNotExistError - } - return vals[0], nil -} - -// hasKey - if the map 'key' exists append it to array -// if it doesn't do nothing except scan array and map values -func hasKey(iv interface{}, key string, ret *[]interface{}, cnt *int, subkeys map[string]interface{}) { - // func hasKey(iv interface{}, key string, ret *[]interface{}, subkeys map[string]interface{}) { - switch iv.(type) { - case map[string]interface{}: - vv := iv.(map[string]interface{}) - // see if the current value is of interest - if v, ok := vv[key]; ok { - switch v.(type) { - case map[string]interface{}: - if hasSubKeys(v, subkeys) { - *ret = append(*ret, v) - *cnt++ - } - case []interface{}: - for _, av := range v.([]interface{}) { - if hasSubKeys(av, subkeys) { - *ret = append(*ret, av) - *cnt++ - } - } - default: - if len(subkeys) == 0 { - *ret = append(*ret, v) - *cnt++ - } - } - } - - // wildcard case - if key == "*" { - for _, v := range vv { - switch v.(type) { - case map[string]interface{}: - if hasSubKeys(v, subkeys) { - *ret = append(*ret, v) - *cnt++ - } - case []interface{}: - for _, av := range v.([]interface{}) { - if hasSubKeys(av, subkeys) { - *ret = append(*ret, av) - *cnt++ - } - } - default: - if len(subkeys) == 0 { - *ret = append(*ret, v) - *cnt++ - } - } - } - } - - // scan the rest - for _, v := range vv { - hasKey(v, key, ret, cnt, subkeys) - } - case []interface{}: - for _, v := range iv.([]interface{}) { - hasKey(v, key, ret, cnt, subkeys) - } - } -} - -// ----------------------- get everything for a node in the Map --------------------------- - -// Allow indexed arrays in "path" specification. (Request from Abhijit Kadam - abhijitk100@gmail.com.) -// 2014.04.28 - implementation note. -// Implemented as a wrapper of (old)ValuesForPath() because we need look-ahead logic to handle expansion -// of wildcards and unindexed arrays. Embedding such logic into valuesForKeyPath() would have made the -// code much more complicated; this wrapper is straightforward, easy to debug, and doesn't add significant overhead. - -// Retrieve all values for a path from the Map. If len(returned_values) == 0, then no match. -// On error, the returned array is 'nil'. -// 'path' is a dot-separated path of key values. -// - If a node in the path is '*', then everything beyond is walked. -// - 'path' can contain indexed array references, such as, "*.data[1]" and "msgs[2].data[0].field" - -// even "*[2].*[0].field". -// 'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list. -// - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them. -// - For attributes prefix the label with a hyphen, '-', e.g., "-seq:3". -// - If the 'path' refers to a list, then "tag:value" would return member of the list. -// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. -// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an -// exclusion critera - e.g., "!author:William T. Gaddis". -// - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|". -func (mv Map) ValuesForPath(path string, subkeys ...string) ([]interface{}, error) { - // If there are no array indexes in path, use legacy ValuesForPath() logic. - if strings.Index(path, "[") < 0 { - return mv.oldValuesForPath(path, subkeys...) - } - - var subKeyMap map[string]interface{} - if len(subkeys) > 0 { - var err error - subKeyMap, err = getSubKeyMap(subkeys...) - if err != nil { - return nil, err - } - } - - keys, kerr := parsePath(path) - if kerr != nil { - return nil, kerr - } - - vals, verr := valuesForArray(keys, mv) - if verr != nil { - return nil, verr // Vals may be nil, but return empty array. - } - - // Need to handle subkeys ... only return members of vals that satisfy conditions. - retvals := make([]interface{}, 0) - for _, v := range vals { - if hasSubKeys(v, subKeyMap) { - retvals = append(retvals, v) - } - } - return retvals, nil -} - -func valuesForArray(keys []*key, m Map) ([]interface{}, error) { - var tmppath string - var haveFirst bool - var vals []interface{} - var verr error - - lastkey := len(keys) - 1 - for i := 0; i <= lastkey; i++ { - if !haveFirst { - tmppath = keys[i].name - haveFirst = true - } else { - tmppath += "." + keys[i].name - } - - // Look-ahead: explode wildcards and unindexed arrays. - // Need to handle un-indexed list recursively: - // e.g., path is "stuff.data[0]" rather than "stuff[0].data[0]". - // Need to treat it as "stuff[0].data[0]", "stuff[1].data[0]", ... - if !keys[i].isArray && i < lastkey && keys[i+1].isArray { - // Can't pass subkeys because we may not be at literal end of path. - vv, vverr := m.oldValuesForPath(tmppath) - if vverr != nil { - return nil, vverr - } - for _, v := range vv { - // See if we can walk the value. - am, ok := v.(map[string]interface{}) - if !ok { - continue - } - // Work the backend. - nvals, nvalserr := valuesForArray(keys[i+1:], Map(am)) - if nvalserr != nil { - return nil, nvalserr - } - vals = append(vals, nvals...) - } - break // have recursed the whole path - return - } - - if keys[i].isArray || i == lastkey { - // Don't pass subkeys because may not be at literal end of path. - vals, verr = m.oldValuesForPath(tmppath) - } else { - continue - } - if verr != nil { - return nil, verr - } - - if i == lastkey && !keys[i].isArray { - break - } - - // Now we're looking at an array - supposedly. - // Is index in range of vals? - if len(vals) <= keys[i].position { - vals = nil - break - } - - // Return the array member of interest, if at end of path. - if i == lastkey { - vals = vals[keys[i].position:(keys[i].position + 1)] - break - } - - // Extract the array member of interest. - am := vals[keys[i].position:(keys[i].position + 1)] - - // must be a map[string]interface{} value so we can keep walking the path - amm, ok := am[0].(map[string]interface{}) - if !ok { - vals = nil - break - } - - m = Map(amm) - haveFirst = false - } - - return vals, nil -} - -type key struct { - name string - isArray bool - position int -} - -func parsePath(s string) ([]*key, error) { - keys := strings.Split(s, ".") - - ret := make([]*key, 0) - - for i := 0; i < len(keys); i++ { - if keys[i] == "" { - continue - } - - newkey := new(key) - if strings.Index(keys[i], "[") < 0 { - newkey.name = keys[i] - ret = append(ret, newkey) - continue - } - - p := strings.Split(keys[i], "[") - newkey.name = p[0] - p = strings.Split(p[1], "]") - if p[0] == "" { // no right bracket - return nil, fmt.Errorf("no right bracket on key index: %s", keys[i]) - } - // convert p[0] to a int value - pos, nerr := strconv.ParseInt(p[0], 10, 32) - if nerr != nil { - return nil, fmt.Errorf("cannot convert index to int value: %s", p[0]) - } - newkey.position = int(pos) - newkey.isArray = true - ret = append(ret, newkey) - } - - return ret, nil -} - -// legacy ValuesForPath() - now wrapped to handle special case of indexed arrays in 'path'. -func (mv Map) oldValuesForPath(path string, subkeys ...string) ([]interface{}, error) { - m := map[string]interface{}(mv) - var subKeyMap map[string]interface{} - if len(subkeys) > 0 { - var err error - subKeyMap, err = getSubKeyMap(subkeys...) - if err != nil { - return nil, err - } - } - - keys := strings.Split(path, ".") - if keys[len(keys)-1] == "" { - keys = keys[:len(keys)-1] - } - ivals := make([]interface{}, 0, defaultArraySize) - var cnt int - valuesForKeyPath(&ivals, &cnt, m, keys, subKeyMap) - return ivals[:cnt], nil -} - -func valuesForKeyPath(ret *[]interface{}, cnt *int, m interface{}, keys []string, subkeys map[string]interface{}) { - lenKeys := len(keys) - - // load 'm' values into 'ret' - // expand any lists - if lenKeys == 0 { - switch m.(type) { - case map[string]interface{}: - if subkeys != nil { - if ok := hasSubKeys(m, subkeys); !ok { - return - } - } - *ret = append(*ret, m) - *cnt++ - case []interface{}: - for i, v := range m.([]interface{}) { - if subkeys != nil { - if ok := hasSubKeys(v, subkeys); !ok { - continue // only load list members with subkeys - } - } - *ret = append(*ret, (m.([]interface{}))[i]) - *cnt++ - } - default: - if subkeys != nil { - return // must be map[string]interface{} if there are subkeys - } - *ret = append(*ret, m) - *cnt++ - } - return - } - - // key of interest - key := keys[0] - switch key { - case "*": // wildcard - scan all values - switch m.(type) { - case map[string]interface{}: - for _, v := range m.(map[string]interface{}) { - // valuesForKeyPath(ret, v, keys[1:], subkeys) - valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) - } - case []interface{}: - for _, v := range m.([]interface{}) { - switch v.(type) { - // flatten out a list of maps - keys are processed - case map[string]interface{}: - for _, vv := range v.(map[string]interface{}) { - // valuesForKeyPath(ret, vv, keys[1:], subkeys) - valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys) - } - default: - // valuesForKeyPath(ret, v, keys[1:], subkeys) - valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) - } - } - } - default: // key - must be map[string]interface{} - switch m.(type) { - case map[string]interface{}: - if v, ok := m.(map[string]interface{})[key]; ok { - // valuesForKeyPath(ret, v, keys[1:], subkeys) - valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) - } - case []interface{}: // may be buried in list - for _, v := range m.([]interface{}) { - switch v.(type) { - case map[string]interface{}: - if vv, ok := v.(map[string]interface{})[key]; ok { - // valuesForKeyPath(ret, vv, keys[1:], subkeys) - valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys) - } - } - } - } - } -} - -// hasSubKeys() - interface{} equality works for string, float64, bool -// 'v' must be a map[string]interface{} value to have subkeys -// 'a' can have k:v pairs with v.(string) == "*", which is treated like a wildcard. -func hasSubKeys(v interface{}, subkeys map[string]interface{}) bool { - if len(subkeys) == 0 { - return true - } - - switch v.(type) { - case map[string]interface{}: - // do all subKey name:value pairs match? - mv := v.(map[string]interface{}) - for skey, sval := range subkeys { - isNotKey := false - if skey[:1] == "!" { // a NOT-key - skey = skey[1:] - isNotKey = true - } - vv, ok := mv[skey] - if !ok { // key doesn't exist - if isNotKey { // key not there, but that's what we want - if kv, ok := sval.(string); ok && kv == "*" { - continue - } - } - return false - } - // wildcard check - if kv, ok := sval.(string); ok && kv == "*" { - if isNotKey { // key is there, and we don't want it - return false - } - continue - } - switch sval.(type) { - case string: - if s, ok := vv.(string); ok && s == sval.(string) { - if isNotKey { - return false - } - continue - } - case bool: - if b, ok := vv.(bool); ok && b == sval.(bool) { - if isNotKey { - return false - } - continue - } - case float64: - if f, ok := vv.(float64); ok && f == sval.(float64) { - if isNotKey { - return false - } - continue - } - } - // key there but didn't match subkey value - if isNotKey { // that's what we want - continue - } - return false - } - // all subkeys matched - return true - } - - // not a map[string]interface{} value, can't have subkeys - return false -} - -// Generate map of key:value entries as map[string]string. -// 'kv' arguments are "name:value" pairs: attribute keys are designated with prepended hyphen, '-'. -// If len(kv) == 0, the return is (nil, nil). -func getSubKeyMap(kv ...string) (map[string]interface{}, error) { - if len(kv) == 0 { - return nil, nil - } - m := make(map[string]interface{}, 0) - for _, v := range kv { - vv := strings.Split(v, fieldSep) - switch len(vv) { - case 2: - m[vv[0]] = interface{}(vv[1]) - case 3: - switch vv[2] { - case "string", "char", "text": - m[vv[0]] = interface{}(vv[1]) - case "bool", "boolean": - // ParseBool treats "1"==true & "0"==false - b, err := strconv.ParseBool(vv[1]) - if err != nil { - return nil, fmt.Errorf("can't convert subkey value to bool: %s", vv[1]) - } - m[vv[0]] = interface{}(b) - case "float", "float64", "num", "number", "numeric": - f, err := strconv.ParseFloat(vv[1], 64) - if err != nil { - return nil, fmt.Errorf("can't convert subkey value to float: %s", vv[1]) - } - m[vv[0]] = interface{}(f) - default: - return nil, fmt.Errorf("unknown subkey conversion spec: %s", v) - } - default: - return nil, fmt.Errorf("unknown subkey spec: %s", v) - } - } - return m, nil -} - -// ------------------------------- END of valuesFor ... ---------------------------- - -// ----------------------- locate where a key value is in the tree ------------------- - -//----------------------------- find all paths to a key -------------------------------- - -// Get all paths through Map, 'mv', (in dot-notation) that terminate with the specified key. -// Results can be used with ValuesForPath. -func (mv Map) PathsForKey(key string) []string { - m := map[string]interface{}(mv) - breadbasket := make(map[string]bool, 0) - breadcrumbs := "" - - hasKeyPath(breadcrumbs, m, key, breadbasket) - if len(breadbasket) == 0 { - return nil - } - - // unpack map keys to return - res := make([]string, len(breadbasket)) - var i int - for k := range breadbasket { - res[i] = k - i++ - } - - return res -} - -// Extract the shortest path from all possible paths - from PathsForKey() - in Map, 'mv'.. -// Paths are strings using dot-notation. -func (mv Map) PathForKeyShortest(key string) string { - paths := mv.PathsForKey(key) - - lp := len(paths) - if lp == 0 { - return "" - } - if lp == 1 { - return paths[0] - } - - shortest := paths[0] - shortestLen := len(strings.Split(shortest, ".")) - - for i := 1; i < len(paths); i++ { - vlen := len(strings.Split(paths[i], ".")) - if vlen < shortestLen { - shortest = paths[i] - shortestLen = vlen - } - } - - return shortest -} - -// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth -// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'. -func hasKeyPath(crumbs string, iv interface{}, key string, basket map[string]bool) { - switch iv.(type) { - case map[string]interface{}: - vv := iv.(map[string]interface{}) - if _, ok := vv[key]; ok { - // create a new breadcrumb, intialized with the one we have - var nbc string - if crumbs == "" { - nbc = key - } else { - nbc = crumbs + "." + key - } - basket[nbc] = true - } - // walk on down the path, key could occur again at deeper node - for k, v := range vv { - // create a new breadcrumb, intialized with the one we have - var nbc string - if crumbs == "" { - nbc = k - } else { - nbc = crumbs + "." + k - } - hasKeyPath(nbc, v, key, basket) - } - case []interface{}: - // crumb-trail doesn't change, pass it on - for _, v := range iv.([]interface{}) { - hasKeyPath(crumbs, v, key, basket) - } - } -} - -var PathNotExistError = errors.New("Path does not exist") - -// ValueForPath wrap ValuesFor Path and returns the first value returned. -// If no value is found it returns 'nil' and PathNotExistError. -func (mv Map) ValueForPath(path string) (interface{}, error) { - vals, err := mv.ValuesForPath(path) - if err != nil { - return nil, err - } - if len(vals) == 0 { - return nil, PathNotExistError - } - return vals[0], nil -} - -// Returns the first found value for the path as a string. -func (mv Map) ValueForPathString(path string) (string, error) { - vals, err := mv.ValuesForPath(path) - if err != nil { - return "", err - } - if len(vals) == 0 { - return "", errors.New("ValueForPath: path not found") - } - val := vals[0] - switch str := val.(type) { - case string: - return str, nil - default: - return "", fmt.Errorf("ValueForPath: unsupported type: %T", str) - } -} - -// Returns the first found value for the path as a string. -// If the path is not found then it returns an empty string. -func (mv Map) ValueOrEmptyForPathString(path string) string { - str, _ := mv.ValueForPathString(path) - return str -} diff --git a/vendor/github.com/clbanning/mxj/leafnode.go b/vendor/github.com/clbanning/mxj/leafnode.go deleted file mode 100644 index cf413ebd..00000000 --- a/vendor/github.com/clbanning/mxj/leafnode.go +++ /dev/null @@ -1,112 +0,0 @@ -package mxj - -// leafnode.go - return leaf nodes with paths and values for the Map -// inspired by: https://groups.google.com/forum/#!topic/golang-nuts/3JhuVKRuBbw - -import ( - "strconv" - "strings" -) - -const ( - NoAttributes = true // suppress LeafNode values that are attributes -) - -// LeafNode - a terminal path value in a Map. -// For XML Map values it represents an attribute or simple element value - of type -// string unless Map was created using Cast flag. For JSON Map values it represents -// a string, numeric, boolean, or null value. -type LeafNode struct { - Path string // a dot-notation representation of the path with array subscripting - Value interface{} // the value at the path termination -} - -// LeafNodes - returns an array of all LeafNode values for the Map. -// The option no_attr argument suppresses attribute values (keys with prepended hyphen, '-') -// as well as the "#text" key for the associated simple element value. -// -// PrependAttrWithHypen(false) will result in attributes having .attr-name as -// terminal node in 'path' while the path for the element value, itself, will be -// the base path w/o "#text". -// -// LeafUseDotNotation(true) causes list members to be identified using ".N" syntax -// rather than "[N]" syntax. -func (mv Map) LeafNodes(no_attr ...bool) []LeafNode { - var a bool - if len(no_attr) == 1 { - a = no_attr[0] - } - - l := make([]LeafNode, 0) - getLeafNodes("", "", map[string]interface{}(mv), &l, a) - return l -} - -func getLeafNodes(path, node string, mv interface{}, l *[]LeafNode, noattr bool) { - // if stripping attributes, then also strip "#text" key - if !noattr || node != "#text" { - if path != "" && node[:1] != "[" { - path += "." - } - path += node - } - switch mv.(type) { - case map[string]interface{}: - for k, v := range mv.(map[string]interface{}) { - // if noattr && k[:1] == "-" { - if noattr && len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 { - continue - } - getLeafNodes(path, k, v, l, noattr) - } - case []interface{}: - for i, v := range mv.([]interface{}) { - if useDotNotation { - getLeafNodes(path, strconv.Itoa(i), v, l, noattr) - } else { - getLeafNodes(path, "["+strconv.Itoa(i)+"]", v, l, noattr) - } - } - default: - // can't walk any further, so create leaf - n := LeafNode{path, mv} - *l = append(*l, n) - } -} - -// LeafPaths - all paths that terminate in LeafNode values. -func (mv Map) LeafPaths(no_attr ...bool) []string { - ln := mv.LeafNodes() - ss := make([]string, len(ln)) - for i := 0; i < len(ln); i++ { - ss[i] = ln[i].Path - } - return ss -} - -// LeafValues - all terminal values in the Map. -func (mv Map) LeafValues(no_attr ...bool) []interface{} { - ln := mv.LeafNodes() - vv := make([]interface{}, len(ln)) - for i := 0; i < len(ln); i++ { - vv[i] = ln[i].Value - } - return vv -} - -// ====================== utilities ====================== - -// https://groups.google.com/forum/#!topic/golang-nuts/pj0C5IrZk4I -var useDotNotation bool - -// LeafUseDotNotation sets a flag that list members in LeafNode paths -// should be identified using ".N" syntax rather than the default "[N]" -// syntax. Calling LeafUseDotNotation with no arguments toggles the -// flag on/off; otherwise, the argument sets the flag value 'true'/'false'. -func LeafUseDotNotation(b ...bool) { - if len(b) == 0 { - useDotNotation = !useDotNotation - return - } - useDotNotation = b[0] -} diff --git a/vendor/github.com/clbanning/mxj/misc.go b/vendor/github.com/clbanning/mxj/misc.go deleted file mode 100644 index 5b4fab21..00000000 --- a/vendor/github.com/clbanning/mxj/misc.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2016 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -// misc.go - mimic functions (+others) called out in: -// https://groups.google.com/forum/#!topic/golang-nuts/jm_aGsJNbdQ -// Primarily these methods let you retrive XML structure information. - -package mxj - -import ( - "fmt" - "sort" - "strings" -) - -// Return the root element of the Map. If there is not a single key in Map, -// then an error is returned. -func (mv Map) Root() (string, error) { - mm := map[string]interface{}(mv) - if len(mm) != 1 { - return "", fmt.Errorf("Map does not have singleton root. Len: %d.", len(mm)) - } - for k, _ := range mm { - return k, nil - } - return "", nil -} - -// If the path is an element with sub-elements, return a list of the sub-element -// keys. (The list is alphabeticly sorted.) NOTE: Map keys that are prefixed with -// '-', a hyphen, are considered attributes; see m.Attributes(path). -func (mv Map) Elements(path string) ([]string, error) { - e, err := mv.ValueForPath(path) - if err != nil { - return nil, err - } - switch e.(type) { - case map[string]interface{}: - ee := e.(map[string]interface{}) - elems := make([]string, len(ee)) - var i int - for k, _ := range ee { - if len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 { - continue // skip attributes - } - elems[i] = k - i++ - } - elems = elems[:i] - // alphabetic sort keeps things tidy - sort.Strings(elems) - return elems, nil - } - return nil, fmt.Errorf("no elements for path: %s", path) -} - -// If the path is an element with attributes, return a list of the attribute -// keys. (The list is alphabeticly sorted.) NOTE: Map keys that are not prefixed with -// '-', a hyphen, are not treated as attributes; see m.Elements(path). Also, if the -// attribute prefix is "" - SetAttrPrefix("") or PrependAttrWithHyphen(false) - then -// there are no identifiable attributes. -func (mv Map) Attributes(path string) ([]string, error) { - a, err := mv.ValueForPath(path) - if err != nil { - return nil, err - } - switch a.(type) { - case map[string]interface{}: - aa := a.(map[string]interface{}) - attrs := make([]string, len(aa)) - var i int - for k, _ := range aa { - if len(attrPrefix) == 0 || strings.Index(k, attrPrefix) != 0 { - continue // skip non-attributes - } - attrs[i] = k[len(attrPrefix):] - i++ - } - attrs = attrs[:i] - // alphabetic sort keeps things tidy - sort.Strings(attrs) - return attrs, nil - } - return nil, fmt.Errorf("no attributes for path: %s", path) -} diff --git a/vendor/github.com/clbanning/mxj/mxj.go b/vendor/github.com/clbanning/mxj/mxj.go deleted file mode 100644 index f0592f06..00000000 --- a/vendor/github.com/clbanning/mxj/mxj.go +++ /dev/null @@ -1,128 +0,0 @@ -// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. -// Copyright 2012-2014 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -package mxj - -import ( - "fmt" - "sort" -) - -const ( - Cast = true // for clarity - e.g., mxj.NewMapXml(doc, mxj.Cast) - SafeEncoding = true // ditto - e.g., mv.Json(mxj.SafeEncoding) -) - -type Map map[string]interface{} - -// Allocate a Map. -func New() Map { - m := make(map[string]interface{}, 0) - return m -} - -// Cast a Map to map[string]interface{} -func (mv Map) Old() map[string]interface{} { - return mv -} - -// Return a copy of mv as a newly allocated Map. If the Map only contains string, -// numeric, map[string]interface{}, and []interface{} values, then it can be thought -// of as a "deep copy." Copying a structure (or structure reference) value is subject -// to the noted restrictions. -// NOTE: If 'mv' includes structure values with, possibly, JSON encoding tags -// then only public fields of the structure are in the new Map - and with -// keys that conform to any encoding tag instructions. The structure itself will -// be represented as a map[string]interface{} value. -func (mv Map) Copy() (Map, error) { - // this is the poor-man's deep copy - // not efficient, but it works - j, jerr := mv.Json() - // must handle, we don't know how mv got built - if jerr != nil { - return nil, jerr - } - return NewMapJson(j) -} - -// --------------- StringIndent ... from x2j.WriteMap ------------- - -// Pretty print a Map. -func (mv Map) StringIndent(offset ...int) string { - return writeMap(map[string]interface{}(mv), true, true, offset...) -} - -// Pretty print a Map without the value type information - just key:value entries. -func (mv Map) StringIndentNoTypeInfo(offset ...int) string { - return writeMap(map[string]interface{}(mv), false, true, offset...) -} - -// writeMap - dumps the map[string]interface{} for examination. -// 'typeInfo' causes value type to be printed. -// 'offset' is initial indentation count; typically: Write(m). -func writeMap(m interface{}, typeInfo, root bool, offset ...int) string { - var indent int - if len(offset) == 1 { - indent = offset[0] - } - - var s string - switch m.(type) { - case []interface{}: - if typeInfo { - s += "[[]interface{}]" - } - for _, v := range m.([]interface{}) { - s += "\n" - for i := 0; i < indent; i++ { - s += " " - } - s += writeMap(v, typeInfo, false, indent+1) - } - case map[string]interface{}: - list := make([][2]string, len(m.(map[string]interface{}))) - var n int - for k, v := range m.(map[string]interface{}) { - list[n][0] = k - list[n][1] = writeMap(v, typeInfo, false, indent+1) - n++ - } - sort.Sort(mapList(list)) - for _, v := range list { - if root { - root = false - } else { - s += "\n" - } - for i := 0; i < indent; i++ { - s += " " - } - s += v[0] + " : " + v[1] - } - default: - if typeInfo { - s += fmt.Sprintf("[%T] %+v", m, m) - } else { - s += fmt.Sprintf("%+v", m) - } - } - return s -} - -// ======================== utility =============== - -type mapList [][2]string - -func (ml mapList) Len() int { - return len(ml) -} - -func (ml mapList) Swap(i, j int) { - ml[i], ml[j] = ml[j], ml[i] -} - -func (ml mapList) Less(i, j int) bool { - return ml[i][0] <= ml[j][0] -} diff --git a/vendor/github.com/clbanning/mxj/newmap.go b/vendor/github.com/clbanning/mxj/newmap.go deleted file mode 100644 index b2939490..00000000 --- a/vendor/github.com/clbanning/mxj/newmap.go +++ /dev/null @@ -1,184 +0,0 @@ -// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. -// Copyright 2012-2014, 2018 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -// remap.go - build a new Map from the current Map based on keyOld:keyNew mapppings -// keys can use dot-notation, keyOld can use wildcard, '*' -// -// Computational strategy - -// Using the key path - []string - traverse a new map[string]interface{} and -// insert the oldVal as the newVal when we arrive at the end of the path. -// If the type at the end is nil, then that is newVal -// If the type at the end is a singleton (string, float64, bool) an array is created. -// If the type at the end is an array, newVal is just appended. -// If the type at the end is a map, it is inserted if possible or the map value -// is converted into an array if necessary. - -package mxj - -import ( - "errors" - "strings" -) - -// (Map)NewMap - create a new Map from data in the current Map. -// 'keypairs' are key mappings "oldKey:newKey" and specify that the current value of 'oldKey' -// should be the value for 'newKey' in the returned Map. -// - 'oldKey' supports dot-notation as described for (Map)ValuesForPath() -// - 'newKey' supports dot-notation but with no wildcards, '*', or indexed arrays -// - "oldKey" is shorthand for the keypair value "oldKey:oldKey" -// - "oldKey:" and ":newKey" are invalid keypair values -// - if 'oldKey' does not exist in the current Map, it is not written to the new Map. -// "null" is not supported unless it is the current Map. -// - see newmap_test.go for several syntax examples -// - mv.NewMap() == mxj.New() -// -// NOTE: "examples/partial.go" shows how to create arbitrary sub-docs of an XML doc. -func (mv Map) NewMap(keypairs ...string) (Map, error) { - n := make(map[string]interface{}, 0) - if len(keypairs) == 0 { - return n, nil - } - - // loop through the pairs - var oldKey, newKey string - var path []string - for _, v := range keypairs { - if len(v) == 0 { - continue // just skip over empty keypair arguments - } - - // initialize oldKey, newKey and check - vv := strings.Split(v, ":") - if len(vv) > 2 { - return n, errors.New("oldKey:newKey keypair value not valid - " + v) - } - if len(vv) == 1 { - oldKey, newKey = vv[0], vv[0] - } else { - oldKey, newKey = vv[0], vv[1] - } - strings.TrimSpace(oldKey) - strings.TrimSpace(newKey) - if i := strings.Index(newKey, "*"); i > -1 { - return n, errors.New("newKey value cannot contain wildcard character - " + v) - } - if i := strings.Index(newKey, "["); i > -1 { - return n, errors.New("newKey value cannot contain indexed arrays - " + v) - } - if oldKey == "" || newKey == "" { - return n, errors.New("oldKey or newKey is not specified - " + v) - } - - // get oldKey value - oldVal, err := mv.ValuesForPath(oldKey) - if err != nil { - return n, err - } - if len(oldVal) == 0 { - continue // oldKey has no value, may not exist in mv - } - - // break down path - path = strings.Split(newKey, ".") - if path[len(path)-1] == "" { // ignore a trailing dot in newKey spec - path = path[:len(path)-1] - } - - addNewVal(&n, path, oldVal) - } - - return n, nil -} - -// navigate 'n' to end of path and add val -func addNewVal(n *map[string]interface{}, path []string, val []interface{}) { - // newVal - either singleton or array - var newVal interface{} - if len(val) == 1 { - newVal = val[0] // is type interface{} - } else { - newVal = interface{}(val) - } - - // walk to the position of interest, create it if necessary - m := (*n) // initialize map walker - var k string // key for m - lp := len(path) - 1 // when to stop looking - for i := 0; i < len(path); i++ { - k = path[i] - if i == lp { - break - } - var nm map[string]interface{} // holds position of next-map - switch m[k].(type) { - case nil: // need a map for next node in path, so go there - nm = make(map[string]interface{}, 0) - m[k] = interface{}(nm) - m = m[k].(map[string]interface{}) - case map[string]interface{}: - // OK - got somewhere to walk to, go there - m = m[k].(map[string]interface{}) - case []interface{}: - // add a map and nm points to new map unless there's already - // a map in the array, then nm points there - // The placement of the next value in the array is dependent - // on the sequence of members - could land on a map or a nil - // value first. TODO: how to test this. - a := make([]interface{}, 0) - var foundmap bool - for _, vv := range m[k].([]interface{}) { - switch vv.(type) { - case nil: // doesn't appear that this occurs, need a test case - if foundmap { // use the first one in array - a = append(a, vv) - continue - } - nm = make(map[string]interface{}, 0) - a = append(a, interface{}(nm)) - foundmap = true - case map[string]interface{}: - if foundmap { // use the first one in array - a = append(a, vv) - continue - } - nm = vv.(map[string]interface{}) - a = append(a, vv) - foundmap = true - default: - a = append(a, vv) - } - } - // no map found in array - if !foundmap { - nm = make(map[string]interface{}, 0) - a = append(a, interface{}(nm)) - } - m[k] = interface{}(a) // must insert in map - m = nm - default: // it's a string, float, bool, etc. - aa := make([]interface{}, 0) - nm = make(map[string]interface{}, 0) - aa = append(aa, m[k], nm) - m[k] = interface{}(aa) - m = nm - } - } - - // value is nil, array or a singleton of some kind - // initially m.(type) == map[string]interface{} - v := m[k] - switch v.(type) { - case nil: // initialized - m[k] = newVal - case []interface{}: - a := m[k].([]interface{}) - a = append(a, newVal) - m[k] = interface{}(a) - default: // v exists:string, float64, bool, map[string]interface, etc. - a := make([]interface{}, 0) - a = append(a, v, newVal) - m[k] = interface{}(a) - } -} diff --git a/vendor/github.com/clbanning/mxj/readme.md b/vendor/github.com/clbanning/mxj/readme.md deleted file mode 100644 index 6bb21dca..00000000 --- a/vendor/github.com/clbanning/mxj/readme.md +++ /dev/null @@ -1,179 +0,0 @@ -

mxj - to/from maps, XML and JSON

-Decode/encode XML to/from map[string]interface{} (or JSON) values, and extract/modify values from maps by key or key-path, including wildcards. - -mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j and mxj/j2x packages. - -

Related Packages

- -https://github.com/clbanning/checkxml provides functions for validating XML data. - -

Refactor Decoder - 2015.11.15

-For over a year I've wanted to refactor the XML-to-map[string]interface{} decoder to make it more performant. I recently took the time to do that, since we were using github.com/clbanning/mxj in a production system that could be deployed on a Raspberry Pi. Now the decoder is comparable to the stdlib JSON-to-map[string]interface{} decoder in terms of its additional processing overhead relative to decoding to a structure value. As shown by: - - BenchmarkNewMapXml-4 100000 18043 ns/op - BenchmarkNewStructXml-4 100000 14892 ns/op - BenchmarkNewMapJson-4 300000 4633 ns/op - BenchmarkNewStructJson-4 300000 3427 ns/op - BenchmarkNewMapXmlBooks-4 20000 82850 ns/op - BenchmarkNewStructXmlBooks-4 20000 67822 ns/op - BenchmarkNewMapJsonBooks-4 100000 17222 ns/op - BenchmarkNewStructJsonBooks-4 100000 15309 ns/op - -

Notices

- - 2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc. - 2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps. - 2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package. - 2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing. - 2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods. - 2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag(). - 2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc. - 2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix(). - 2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable. - 2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars(). - 2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf". - To cast them to float64, first set flag with CastNanInf(true). - 2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure. - 2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization. - 2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM). - 2015.12.02: XML decoding/encoding that preserves original structure of document. See NewMapXmlSeq() - and mv.XmlSeq() / mv.XmlSeqIndent(). - 2015-05-20: New: mv.StringIndentNoTypeInfo(). - Also, alphabetically sort map[string]interface{} values by key to prettify output for mv.Xml(), - mv.XmlIndent(), mv.StringIndent(), mv.StringIndentNoTypeInfo(). - 2014-11-09: IncludeTagSeqNum() adds "_seq" key with XML doc positional information. - (NOTE: PreserveXmlList() is similar and will be here soon.) - 2014-09-18: inspired by NYTimes fork, added PrependAttrWithHyphen() to allow stripping hyphen from attribute tag. - 2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML. - 2014-04-28: ValuesForPath() and NewMap() now accept path with indexed array references. - -

Basic Unmarshal XML to map[string]interface{}

-
type Map map[string]interface{}
- -Create a `Map` value, 'mv', from any `map[string]interface{}` value, 'v': -
mv := Map(v)
- -Unmarshal / marshal XML as a `Map` value, 'mv': -
mv, err := NewMapXml(xmlValue) // unmarshal
-xmlValue, err := mv.Xml()      // marshal
- -Unmarshal XML from an `io.Reader` as a `Map` value, 'mv': -
mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
-mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded
- -Marshal `Map` value, 'mv', to an XML Writer (`io.Writer`): -
err := mv.XmlWriter(xmlWriter)
-raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter
- -Also, for prettified output: -
xmlValue, err := mv.XmlIndent(prefix, indent, ...)
-err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
-raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)
- -Bulk process XML with error handling (note: handlers must return a boolean value): -
err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
-err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))
- -Converting XML to JSON: see Examples for `NewMapXml` and `HandleXmlReader`. - -There are comparable functions and methods for JSON processing. - -Arbitrary structure values can be decoded to / encoded from `Map` values: -
mv, err := NewMapStruct(structVal)
-err := mv.Struct(structPointer)
- -

Extract / modify Map values

-To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON -or structure to a `Map` value, 'mv', or cast a `map[string]interface{}` value to a `Map` value, 'mv', then: -
paths := mv.PathsForKey(key)
-path := mv.PathForKeyShortest(key)
-values, err := mv.ValuesForKey(key, subkeys)
-values, err := mv.ValuesForPath(path, subkeys)
-count, err := mv.UpdateValuesForPath(newVal, path, subkeys)
- -Get everything at once, irrespective of path depth: -
leafnodes := mv.LeafNodes()
-leafvalues := mv.LeafValues()
- -A new `Map` with whatever keys are desired can be created from the current `Map` and then encoded in XML -or JSON. (Note: keys can use dot-notation.) -
newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
-newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
-newXml, err := newMap.Xml()   // for example
-newJson, err := newMap.Json() // ditto
- -

Usage

- -The package is fairly well [self-documented with examples](http://godoc.org/github.com/clbanning/mxj). - -Also, the subdirectory "examples" contains a wide range of examples, several taken from golang-nuts discussions. - -

XML parsing conventions

- -Using NewMapXml() - - - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`, - to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or - `SetAttrPrefix()`.) - - If the element is a simple element and has attributes, the element value - is given the key `#text` for its `map[string]interface{}` representation. (See - the 'atomFeedString.xml' test data, below.) - - XML comments, directives, and process instructions are ignored. - - If CoerceKeysToLower() has been called, then the resultant keys will be lower case. - -Using NewMapXmlSeq() - - - Attributes are parsed to `map["#attr"]map[]map[string]interface{}`values - where the `` value has "#text" and "#seq" keys - the "#text" key holds the - value for ``. - - All elements, except for the root, have a "#seq" key. - - Comments, directives, and process instructions are unmarshalled into the Map using the - keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more - specifics.) - - Name space syntax is preserved: - - `something` parses to `map["ns:key"]interface{}{"something"}` - - `xmlns:ns="http://myns.com/ns"` parses to `map["xmlns:ns"]interface{}{"http://myns.com/ns"}` - -Both - - - By default, "Nan", "Inf", and "-Inf" values are not cast to float64. If you want them - to be cast, set a flag to cast them using CastNanInf(true). - -

XML encoding conventions

- - - 'nil' `Map` values, which may represent 'null' JSON values, are encoded as ``. - NOTE: the operation is not symmetric as `` elements are decoded as `tag:""` `Map` values, - which, then, encode in JSON as `"tag":""` values. - - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one. (Go - randomizes the walk through map[string]interface{} values.) If you plan to re-encode the - Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and - mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when - working with the Map representation. - -

Running "go test"

- -Because there are no guarantees on the sequence map elements are retrieved, the tests have been -written for visual verification in most cases. One advantage is that you can easily use the -output from running "go test" as examples of calling the various functions and methods. - -

Motivation

- -I make extensive use of JSON for messaging and typically unmarshal the messages into -`map[string]interface{}` values. This is easily done using `json.Unmarshal` from the -standard Go libraries. Unfortunately, many legacy solutions use structured -XML messages; in those environments the applications would have to be refactored to -interoperate with my components. - -The better solution is to just provide an alternative HTTP handler that receives -XML messages and parses it into a `map[string]interface{}` value and then reuse -all the JSON-based code. The Go `xml.Unmarshal()` function does not provide the same -option of unmarshaling XML messages into `map[string]interface{}` values. So I wrote -a couple of small functions to fill this gap and released them as the x2j package. - -Over the next year and a half additional features were added, and the companion j2x -package was released to address XML encoding of arbitrary JSON and `map[string]interface{}` -values. As part of a refactoring of our production system and looking at how we had been -using the x2j and j2x packages we found that we rarely performed direct XML-to-JSON or -JSON-to_XML conversion and that working with the XML or JSON as `map[string]interface{}` -values was the primary value. Thus, everything was refactored into the mxj package. - diff --git a/vendor/github.com/clbanning/mxj/remove.go b/vendor/github.com/clbanning/mxj/remove.go deleted file mode 100644 index 8362ab17..00000000 --- a/vendor/github.com/clbanning/mxj/remove.go +++ /dev/null @@ -1,37 +0,0 @@ -package mxj - -import "strings" - -// Removes the path. -func (mv Map) Remove(path string) error { - m := map[string]interface{}(mv) - return remove(m, path) -} - -func remove(m interface{}, path string) error { - val, err := prevValueByPath(m, path) - if err != nil { - return err - } - - lastKey := lastKey(path) - delete(val, lastKey) - - return nil -} - -// returns the last key of the path. -// lastKey("a.b.c") would had returned "c" -func lastKey(path string) string { - keys := strings.Split(path, ".") - key := keys[len(keys)-1] - return key -} - -// returns the path without the last key -// parentPath("a.b.c") whould had returned "a.b" -func parentPath(path string) string { - keys := strings.Split(path, ".") - parentPath := strings.Join(keys[0:len(keys)-1], ".") - return parentPath -} diff --git a/vendor/github.com/clbanning/mxj/rename.go b/vendor/github.com/clbanning/mxj/rename.go deleted file mode 100644 index e95a9639..00000000 --- a/vendor/github.com/clbanning/mxj/rename.go +++ /dev/null @@ -1,54 +0,0 @@ -package mxj - -import ( - "errors" - "strings" -) - -// RenameKey renames a key in a Map. -// It works only for nested maps. It doesn't work for cases when it buried in a list. -func (mv Map) RenameKey(path string, newName string) error { - if !mv.Exists(path) { - return errors.New("RenameKey: path not found: " + path) - } - if mv.Exists(parentPath(path) + "." + newName) { - return errors.New("RenameKey: key already exists: " + newName) - } - - m := map[string]interface{}(mv) - return renameKey(m, path, newName) -} - -func renameKey(m interface{}, path string, newName string) error { - val, err := prevValueByPath(m, path) - if err != nil { - return err - } - - oldName := lastKey(path) - val[newName] = val[oldName] - delete(val, oldName) - - return nil -} - -// returns a value which contains a last key in the path -// For example: prevValueByPath("a.b.c", {a{b{c: 3}}}) returns {c: 3} -func prevValueByPath(m interface{}, path string) (map[string]interface{}, error) { - keys := strings.Split(path, ".") - - switch mValue := m.(type) { - case map[string]interface{}: - for key, value := range mValue { - if key == keys[0] { - if len(keys) == 1 { - return mValue, nil - } else { - // keep looking for the full path to the key - return prevValueByPath(value, strings.Join(keys[1:], ".")) - } - } - } - } - return nil, errors.New("prevValueByPath: didn't find path – " + path) -} diff --git a/vendor/github.com/clbanning/mxj/set.go b/vendor/github.com/clbanning/mxj/set.go deleted file mode 100644 index a297fc38..00000000 --- a/vendor/github.com/clbanning/mxj/set.go +++ /dev/null @@ -1,26 +0,0 @@ -package mxj - -import ( - "strings" -) - -// Sets the value for the path -func (mv Map) SetValueForPath(value interface{}, path string) error { - pathAry := strings.Split(path, ".") - parentPathAry := pathAry[0 : len(pathAry)-1] - parentPath := strings.Join(parentPathAry, ".") - - val, err := mv.ValueForPath(parentPath) - if err != nil { - return err - } - if val == nil { - return nil // we just ignore the request if there's no val - } - - key := pathAry[len(pathAry)-1] - cVal := val.(map[string]interface{}) - cVal[key] = value - - return nil -} diff --git a/vendor/github.com/clbanning/mxj/setfieldsep.go b/vendor/github.com/clbanning/mxj/setfieldsep.go deleted file mode 100644 index b70715eb..00000000 --- a/vendor/github.com/clbanning/mxj/setfieldsep.go +++ /dev/null @@ -1,20 +0,0 @@ -package mxj - -// Per: https://github.com/clbanning/mxj/issues/37#issuecomment-278651862 -var fieldSep string = ":" - -// SetFieldSeparator changes the default field separator, ":", for the -// newVal argument in mv.UpdateValuesForPath and the optional 'subkey' arguments -// in mv.ValuesForKey and mv.ValuesForPath. -// -// E.g., if the newVal value is "http://blah/blah", setting the field separator -// to "|" will allow the newVal specification, "|http://blah/blah" to parse -// properly. If called with no argument or an empty string value, the field -// separator is set to the default, ":". -func SetFieldSeparator(s ...string) { - if len(s) == 0 || s[0] == "" { - fieldSep = ":" // the default - return - } - fieldSep = s[0] -} diff --git a/vendor/github.com/clbanning/mxj/songtext.xml b/vendor/github.com/clbanning/mxj/songtext.xml deleted file mode 100644 index 8c0f2bec..00000000 --- a/vendor/github.com/clbanning/mxj/songtext.xml +++ /dev/null @@ -1,29 +0,0 @@ - - help me! - - - - Henry was a renegade - Didn't like to play it safe - One component at a time - There's got to be a better way - Oh, people came from miles around - Searching for a steady job - Welcome to the Motor Town - Booming like an atom bomb - - - Oh, Henry was the end of the story - Then everything went wrong - And we'll return it to its former glory - But it just takes so long - - - - It's going to take a long time - It's going to take it, but we'll make it one day - It's going to take a long time - It's going to take it, but we'll make it one day - - - diff --git a/vendor/github.com/clbanning/mxj/strict.go b/vendor/github.com/clbanning/mxj/strict.go deleted file mode 100644 index 1e769560..00000000 --- a/vendor/github.com/clbanning/mxj/strict.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -// strict.go actually addresses setting xml.Decoder attribute -// values. This'll let you parse non-standard XML. - -package mxj - -import ( - "encoding/xml" -) - -// CustomDecoder can be used to specify xml.Decoder attribute -// values, e.g., Strict:false, to be used. By default CustomDecoder -// is nil. If CustomeDecoder != nil, then mxj.XmlCharsetReader variable is -// ignored and must be set as part of the CustomDecoder value, if needed. -// Usage: -// mxj.CustomDecoder = &xml.Decoder{Strict:false} -var CustomDecoder *xml.Decoder - -// useCustomDecoder copy over public attributes from customDecoder -func useCustomDecoder(d *xml.Decoder) { - d.Strict = CustomDecoder.Strict - d.AutoClose = CustomDecoder.AutoClose - d.Entity = CustomDecoder.Entity - d.CharsetReader = CustomDecoder.CharsetReader - d.DefaultSpace = CustomDecoder.DefaultSpace -} - diff --git a/vendor/github.com/clbanning/mxj/struct.go b/vendor/github.com/clbanning/mxj/struct.go deleted file mode 100644 index 9be636cd..00000000 --- a/vendor/github.com/clbanning/mxj/struct.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2012-2017 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -package mxj - -import ( - "encoding/json" - "errors" - "reflect" - - // "github.com/fatih/structs" -) - -// Create a new Map value from a structure. Error returned if argument is not a structure. -// Only public structure fields are decoded in the Map value. See github.com/fatih/structs#Map -// for handling of "structs" tags. - -// DEPRECATED - import github.com/fatih/structs and cast result of structs.Map to mxj.Map. -// import "github.com/fatih/structs" -// ... -// sm, err := structs.Map() -// if err != nil { -// // handle error -// } -// m := mxj.Map(sm) -// Alernatively uncomment the old source and import in struct.go. -func NewMapStruct(structVal interface{}) (Map, error) { - return nil, errors.New("deprecated - see package documentation") - /* - if !structs.IsStruct(structVal) { - return nil, errors.New("NewMapStruct() error: argument is not type Struct") - } - return structs.Map(structVal), nil - */ -} - -// Marshal a map[string]interface{} into a structure referenced by 'structPtr'. Error returned -// if argument is not a pointer or if json.Unmarshal returns an error. -// json.Unmarshal structure encoding rules are followed to encode public structure fields. -func (mv Map) Struct(structPtr interface{}) error { - // should check that we're getting a pointer. - if reflect.ValueOf(structPtr).Kind() != reflect.Ptr { - return errors.New("mv.Struct() error: argument is not type Ptr") - } - - m := map[string]interface{}(mv) - j, err := json.Marshal(m) - if err != nil { - return err - } - - return json.Unmarshal(j, structPtr) -} diff --git a/vendor/github.com/clbanning/mxj/updatevalues.go b/vendor/github.com/clbanning/mxj/updatevalues.go deleted file mode 100644 index 46779f4f..00000000 --- a/vendor/github.com/clbanning/mxj/updatevalues.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2012-2014, 2017 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -// updatevalues.go - modify a value based on path and possibly sub-keys -// TODO(clb): handle simple elements with attributes and NewMapXmlSeq Map values. - -package mxj - -import ( - "fmt" - "strconv" - "strings" -) - -// Update value based on path and possible sub-key values. -// A count of the number of values changed and any error are returned. -// If the count == 0, then no path (and subkeys) matched. -// 'newVal' can be a Map or map[string]interface{} value with a single 'key' that is the key to be modified -// or a string value "key:value[:type]" where type is "bool" or "num" to cast the value. -// 'path' is dot-notation list of keys to traverse; last key in path can be newVal key -// NOTE: 'path' spec does not currently support indexed array references. -// 'subkeys' are "key:value[:type]" entries that must match for path node -// The subkey can be wildcarded - "key:*" - to require that it's there with some value. -// If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an -// exclusion critera - e.g., "!author:William T. Gaddis". -// -// NOTES: -// 1. Simple elements with attributes need a path terminated as ".#text" to modify the actual value. -// 2. Values in Maps created using NewMapXmlSeq are map[string]interface{} values with a "#text" key. -// 3. If values in 'newVal' or 'subkeys' args contain ":", use SetFieldSeparator to an unused symbol, -// perhaps "|". -func (mv Map) UpdateValuesForPath(newVal interface{}, path string, subkeys ...string) (int, error) { - m := map[string]interface{}(mv) - - // extract the subkeys - var subKeyMap map[string]interface{} - if len(subkeys) > 0 { - var err error - subKeyMap, err = getSubKeyMap(subkeys...) - if err != nil { - return 0, err - } - } - - // extract key and value from newVal - var key string - var val interface{} - switch newVal.(type) { - case map[string]interface{}, Map: - switch newVal.(type) { // "fallthrough is not permitted in type switch" (Spec) - case Map: - newVal = newVal.(Map).Old() - } - if len(newVal.(map[string]interface{})) != 1 { - return 0, fmt.Errorf("newVal map can only have len == 1 - %+v", newVal) - } - for key, val = range newVal.(map[string]interface{}) { - } - case string: // split it as a key:value pair - ss := strings.Split(newVal.(string), fieldSep) - n := len(ss) - if n < 2 || n > 3 { - return 0, fmt.Errorf("unknown newVal spec - %+v", newVal) - } - key = ss[0] - if n == 2 { - val = interface{}(ss[1]) - } else if n == 3 { - switch ss[2] { - case "bool", "boolean": - nv, err := strconv.ParseBool(ss[1]) - if err != nil { - return 0, fmt.Errorf("can't convert newVal to bool - %+v", newVal) - } - val = interface{}(nv) - case "num", "numeric", "float", "int": - nv, err := strconv.ParseFloat(ss[1], 64) - if err != nil { - return 0, fmt.Errorf("can't convert newVal to float64 - %+v", newVal) - } - val = interface{}(nv) - default: - return 0, fmt.Errorf("unknown type for newVal value - %+v", newVal) - } - } - default: - return 0, fmt.Errorf("invalid newVal type - %+v", newVal) - } - - // parse path - keys := strings.Split(path, ".") - - var count int - updateValuesForKeyPath(key, val, m, keys, subKeyMap, &count) - - return count, nil -} - -// navigate the path -func updateValuesForKeyPath(key string, value interface{}, m interface{}, keys []string, subkeys map[string]interface{}, cnt *int) { - // ----- at end node: looking at possible node to get 'key' ---- - if len(keys) == 1 { - updateValue(key, value, m, keys[0], subkeys, cnt) - return - } - - // ----- here we are navigating the path thru the penultimate node -------- - // key of interest is keys[0] - the next in the path - switch keys[0] { - case "*": // wildcard - scan all values - switch m.(type) { - case map[string]interface{}: - for _, v := range m.(map[string]interface{}) { - updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) - } - case []interface{}: - for _, v := range m.([]interface{}) { - switch v.(type) { - // flatten out a list of maps - keys are processed - case map[string]interface{}: - for _, vv := range v.(map[string]interface{}) { - updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt) - } - default: - updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) - } - } - } - default: // key - must be map[string]interface{} - switch m.(type) { - case map[string]interface{}: - if v, ok := m.(map[string]interface{})[keys[0]]; ok { - updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) - } - case []interface{}: // may be buried in list - for _, v := range m.([]interface{}) { - switch v.(type) { - case map[string]interface{}: - if vv, ok := v.(map[string]interface{})[keys[0]]; ok { - updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt) - } - } - } - } - } -} - -// change value if key and subkeys are present -func updateValue(key string, value interface{}, m interface{}, keys0 string, subkeys map[string]interface{}, cnt *int) { - // there are two possible options for the value of 'keys0': map[string]interface, []interface{} - // and 'key' is a key in the map or is a key in a map in a list. - switch m.(type) { - case map[string]interface{}: // gotta have the last key - if keys0 == "*" { - for k := range m.(map[string]interface{}) { - updateValue(key, value, m, k, subkeys, cnt) - } - return - } - endVal, _ := m.(map[string]interface{})[keys0] - - // if newV key is the end of path, replace the value for path-end - // may be []interface{} - means replace just an entry w/ subkeys - // otherwise replace the keys0 value if subkeys are there - // NOTE: this will replace the subkeys, also - if key == keys0 { - switch endVal.(type) { - case map[string]interface{}: - if hasSubKeys(m, subkeys) { - (m.(map[string]interface{}))[keys0] = value - (*cnt)++ - } - case []interface{}: - // without subkeys can't select list member to modify - // so key:value spec is it ... - if hasSubKeys(m, subkeys) { - (m.(map[string]interface{}))[keys0] = value - (*cnt)++ - break - } - nv := make([]interface{}, 0) - var valmodified bool - for _, v := range endVal.([]interface{}) { - // check entry subkeys - if hasSubKeys(v, subkeys) { - // replace v with value - nv = append(nv, value) - valmodified = true - (*cnt)++ - continue - } - nv = append(nv, v) - } - if valmodified { - (m.(map[string]interface{}))[keys0] = interface{}(nv) - } - default: // anything else is a strict replacement - if hasSubKeys(m, subkeys) { - (m.(map[string]interface{}))[keys0] = value - (*cnt)++ - } - } - return - } - - // so value is for an element of endVal - // if endVal is a map then 'key' must be there w/ subkeys - // if endVal is a list then 'key' must be in a list member w/ subkeys - switch endVal.(type) { - case map[string]interface{}: - if !hasSubKeys(endVal, subkeys) { - return - } - if _, ok := (endVal.(map[string]interface{}))[key]; ok { - (endVal.(map[string]interface{}))[key] = value - (*cnt)++ - } - case []interface{}: // keys0 points to a list, check subkeys - for _, v := range endVal.([]interface{}) { - // got to be a map so we can replace value for 'key' - vv, vok := v.(map[string]interface{}) - if !vok { - continue - } - if _, ok := vv[key]; !ok { - continue - } - if !hasSubKeys(vv, subkeys) { - continue - } - vv[key] = value - (*cnt)++ - } - } - case []interface{}: // key may be in a list member - // don't need to handle keys0 == "*"; we're looking at everything, anyway. - for _, v := range m.([]interface{}) { - // only map values - we're looking for 'key' - mm, ok := v.(map[string]interface{}) - if !ok { - continue - } - if _, ok := mm[key]; !ok { - continue - } - if !hasSubKeys(mm, subkeys) { - continue - } - mm[key] = value - (*cnt)++ - } - } - - // return -} diff --git a/vendor/github.com/clbanning/mxj/xml.go b/vendor/github.com/clbanning/mxj/xml.go deleted file mode 100644 index fac0f1d3..00000000 --- a/vendor/github.com/clbanning/mxj/xml.go +++ /dev/null @@ -1,1139 +0,0 @@ -// Copyright 2012-2016 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -// xml.go - basically the core of X2j for map[string]interface{} values. -// NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter -// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages. - -package mxj - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -// ------------------- NewMapXml & NewMapXmlReader ... ------------------------- - -// If XmlCharsetReader != nil, it will be used to decode the XML, if required. -// Note: if CustomDecoder != nil, then XmlCharsetReader is ignored; -// set the CustomDecoder attribute instead. -// import ( -// charset "code.google.com/p/go-charset/charset" -// github.com/clbanning/mxj -// ) -// ... -// mxj.XmlCharsetReader = charset.NewReader -// m, merr := mxj.NewMapXml(xmlValue) -var XmlCharsetReader func(charset string, input io.Reader) (io.Reader, error) - -// NewMapXml - convert a XML doc into a Map -// (This is analogous to unmarshalling a JSON string to map[string]interface{} using json.Unmarshal().) -// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible. -// -// Converting XML to JSON is a simple as: -// ... -// mapVal, merr := mxj.NewMapXml(xmlVal) -// if merr != nil { -// // handle error -// } -// jsonVal, jerr := mapVal.Json() -// if jerr != nil { -// // handle error -// } -// -// NOTES: -// 1. The 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other -// extraneous xml.CharData will be ignored unless io.EOF is reached first. -// 2. If CoerceKeysToLower() has been called, then all key values will be lower case. -// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. -func NewMapXml(xmlVal []byte, cast ...bool) (Map, error) { - var r bool - if len(cast) == 1 { - r = cast[0] - } - return xmlToMap(xmlVal, r) -} - -// Get next XML doc from an io.Reader as a Map value. Returns Map value. -// NOTES: -// 1. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other -// extraneous xml.CharData will be ignored unless io.EOF is reached first. -// 2. If CoerceKeysToLower() has been called, then all key values will be lower case. -// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. -func NewMapXmlReader(xmlReader io.Reader, cast ...bool) (Map, error) { - var r bool - if len(cast) == 1 { - r = cast[0] - } - - // We need to put an *os.File reader in a ByteReader or the xml.NewDecoder - // will wrap it in a bufio.Reader and seek on the file beyond where the - // xml.Decoder parses! - if _, ok := xmlReader.(io.ByteReader); !ok { - xmlReader = myByteReader(xmlReader) // see code at EOF - } - - // build the map - return xmlReaderToMap(xmlReader, r) -} - -// Get next XML doc from an io.Reader as a Map value. Returns Map value and slice with the raw XML. -// NOTES: -// 1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte -// using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact. -// See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large -// data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body -// you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call. -// 2. The 'raw' return value may be larger than the XML text value. -// 3. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other -// extraneous xml.CharData will be ignored unless io.EOF is reached first. -// 4. If CoerceKeysToLower() has been called, then all key values will be lower case. -// 5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. -func NewMapXmlReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) { - var r bool - if len(cast) == 1 { - r = cast[0] - } - // create TeeReader so we can retrieve raw XML - buf := make([]byte, 0) - wb := bytes.NewBuffer(buf) - trdr := myTeeReader(xmlReader, wb) // see code at EOF - - m, err := xmlReaderToMap(trdr, r) - - // retrieve the raw XML that was decoded - b := wb.Bytes() - - if err != nil { - return nil, b, err - } - - return m, b, nil -} - -// xmlReaderToMap() - parse a XML io.Reader to a map[string]interface{} value -func xmlReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) { - // parse the Reader - p := xml.NewDecoder(rdr) - if CustomDecoder != nil { - useCustomDecoder(p) - } else { - p.CharsetReader = XmlCharsetReader - } - return xmlToMapParser("", nil, p, r) -} - -// xmlToMap - convert a XML doc into map[string]interface{} value -func xmlToMap(doc []byte, r bool) (map[string]interface{}, error) { - b := bytes.NewReader(doc) - p := xml.NewDecoder(b) - if CustomDecoder != nil { - useCustomDecoder(p) - } else { - p.CharsetReader = XmlCharsetReader - } - return xmlToMapParser("", nil, p, r) -} - -// ===================================== where the work happens ============================= - -// PrependAttrWithHyphen. Prepend attribute tags with a hyphen. -// Default is 'true'. (Not applicable to NewMapXmlSeq(), mv.XmlSeq(), etc.) -// Note: -// If 'false', unmarshaling and marshaling is not symmetric. Attributes will be -// marshal'd as attr and may be part of a list. -func PrependAttrWithHyphen(v bool) { - if v { - attrPrefix = "-" - lenAttrPrefix = len(attrPrefix) - return - } - attrPrefix = "" - lenAttrPrefix = len(attrPrefix) -} - -// Include sequence id with inner tags. - per Sean Murphy, murphysean84@gmail.com. -var includeTagSeqNum bool - -// IncludeTagSeqNum - include a "_seq":N key:value pair with each inner tag, denoting -// its position when parsed. This is of limited usefulness, since list values cannot -// be tagged with "_seq" without changing their depth in the Map. -// So THIS SHOULD BE USED WITH CAUTION - see the test cases. Here's a sample of what -// you get. -/* - - - - - hello - - - parses as: - - { - Obj:{ - "-c":"la", - "-h":"da", - "-x":"dee", - "intObj":[ - { - "-id"="3", - "_seq":"0" // if mxj.Cast is passed, then: "_seq":0 - }, - { - "-id"="2", - "_seq":"2" - }], - "intObj1":{ - "-id":"1", - "_seq":"1" - }, - "StrObj":{ - "#text":"hello", // simple element value gets "#text" tag - "_seq":"3" - } - } - } -*/ -func IncludeTagSeqNum(b bool) { - includeTagSeqNum = b -} - -// all keys will be "lower case" -var lowerCase bool - -// Coerce all tag values to keys in lower case. This is useful if you've got sources with variable -// tag capitalization, and you want to use m.ValuesForKeys(), etc., with the key or path spec -// in lower case. -// CoerceKeysToLower() will toggle the coercion flag true|false - on|off -// CoerceKeysToLower(true|false) will set the coercion flag on|off -// -// NOTE: only recognized by NewMapXml, NewMapXmlReader, and NewMapXmlReaderRaw functions as well as -// the associated HandleXmlReader and HandleXmlReaderRaw. -func CoerceKeysToLower(b ...bool) { - if len(b) == 0 { - lowerCase = !lowerCase - } else if len(b) == 1 { - lowerCase = b[0] - } -} - -// 25jun16: Allow user to specify the "prefix" character for XML attribute key labels. -// We do this by replacing '`' constant with attrPrefix var, replacing useHyphen with attrPrefix = "", -// and adding a SetAttrPrefix(s string) function. - -var attrPrefix string = `-` // the default -var lenAttrPrefix int = 1 // the default - -// SetAttrPrefix changes the default, "-", to the specified value, s. -// SetAttrPrefix("") is the same as PrependAttrWithHyphen(false). -// (Not applicable for NewMapXmlSeq(), mv.XmlSeq(), etc.) -func SetAttrPrefix(s string) { - attrPrefix = s - lenAttrPrefix = len(attrPrefix) -} - -// 18jan17: Allows user to specify if the map keys should be in snake case instead -// of the default hyphenated notation. -var snakeCaseKeys bool - -// CoerceKeysToSnakeCase changes the default, false, to the specified value, b. -// Note: the attribute prefix will be a hyphen, '-', or what ever string value has -// been specified using SetAttrPrefix. -func CoerceKeysToSnakeCase(b ...bool) { - if len(b) == 0 { - snakeCaseKeys = !snakeCaseKeys - } else if len(b) == 1 { - snakeCaseKeys = b[0] - } -} - -// 05feb17: support processing XMPP streams (issue #36) -var handleXMPPStreamTag bool - -// HandleXMPPStreamTag causes decoder to parse XMPP elements. -// If called with no argument, XMPP stream element handling is toggled on/off. -// (See xmppStream_test.go for example.) -// If called with NewMapXml, NewMapXmlReader, New MapXmlReaderRaw the "stream" -// element will be returned as: -// map["stream"]interface{}{map[-]interface{}}. -// If called with NewMapSeq, NewMapSeqReader, NewMapSeqReaderRaw the "stream" -// element will be returned as: -// map["stream:stream"]interface{}{map["#attr"]interface{}{map[string]interface{}}} -// where the "#attr" values have "#text" and "#seq" keys. (See NewMapXmlSeq.) -func HandleXMPPStreamTag(b ...bool) { - if len(b) == 0 { - handleXMPPStreamTag = !handleXMPPStreamTag - } else if len(b) == 1 { - handleXMPPStreamTag = b[0] - } -} - -// 21jan18 - decode all values as map["#text":value] (issue #56) -var decodeSimpleValuesAsMap bool - -// DecodeSimpleValuesAsMap forces all values to be decoded as map["#text":]. -// If called with no argument, the decoding is toggled on/off. -// -// By default the NewMapXml functions decode simple values without attributes as -// map[:]. This function causes simple values without attributes to be -// decoded the same as simple values with attributes - map[:map["#text":]]. -func DecodeSimpleValuesAsMap(b ...bool) { - if len(b) == 0 { - decodeSimpleValuesAsMap = !decodeSimpleValuesAsMap - } else if len(b) == 1 { - decodeSimpleValuesAsMap = b[0] - } -} - -// xmlToMapParser (2015.11.12) - load a 'clean' XML doc into a map[string]interface{} directly. -// A refactoring of xmlToTreeParser(), markDuplicate() and treeToMap() - here, all-in-one. -// We've removed the intermediate *node tree with the allocation and subsequent rescanning. -func xmlToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) { - if lowerCase { - skey = strings.ToLower(skey) - } - if snakeCaseKeys { - skey = strings.Replace(skey, "-", "_", -1) - } - - // NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'. - // Unless 'skey' is a simple element w/o attributes, in which case the xml.CharData value is the value. - var n, na map[string]interface{} - var seq int // for includeTagSeqNum - - // Allocate maps and load attributes, if any. - // NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through - // to get StartElement then recurse with skey==xml.StartElement.Name.Local - // where we begin allocating map[string]interface{} values 'n' and 'na'. - if skey != "" { - n = make(map[string]interface{}) // old n - na = make(map[string]interface{}) // old n.nodes - if len(a) > 0 { - for _, v := range a { - if snakeCaseKeys { - v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1) - } - var key string - key = attrPrefix + v.Name.Local - if lowerCase { - key = strings.ToLower(key) - } - na[key] = cast(v.Value, r) - } - } - } - // Return XMPP message. - if handleXMPPStreamTag && skey == "stream" { - n[skey] = na - return n, nil - } - - for { - t, err := p.Token() - if err != nil { - if err != io.EOF { - return nil, errors.New("xml.Decoder.Token() - " + err.Error()) - } - return nil, err - } - switch t.(type) { - case xml.StartElement: - tt := t.(xml.StartElement) - - // First call to xmlToMapParser() doesn't pass xml.StartElement - the map key. - // So when the loop is first entered, the first token is the root tag along - // with any attributes, which we process here. - // - // Subsequent calls to xmlToMapParser() will pass in tag+attributes for - // processing before getting the next token which is the element value, - // which is done above. - if skey == "" { - return xmlToMapParser(tt.Name.Local, tt.Attr, p, r) - } - - // If not initializing the map, parse the element. - // len(nn) == 1, necessarily - it is just an 'n'. - nn, err := xmlToMapParser(tt.Name.Local, tt.Attr, p, r) - if err != nil { - return nil, err - } - - // The nn map[string]interface{} value is a na[nn_key] value. - // We need to see if nn_key already exists - means we're parsing a list. - // This may require converting na[nn_key] value into []interface{} type. - // First, extract the key:val for the map - it's a singleton. - // Note: - // * if CoerceKeysToLower() called, then key will be lower case. - // * if CoerceKeysToSnakeCase() called, then key will be converted to snake case. - var key string - var val interface{} - for key, val = range nn { - break - } - - // IncludeTagSeqNum requests that the element be augmented with a "_seq" sub-element. - // In theory, we don't need this if len(na) == 1. But, we don't know what might - // come next - we're only parsing forward. So if you ask for 'includeTagSeqNum' you - // get it on every element. (Personally, I never liked this, but I added it on request - // and did get a $50 Amazon gift card in return - now we support it for backwards compatibility!) - if includeTagSeqNum { - switch val.(type) { - case []interface{}: - // noop - There's no clean way to handle this w/o changing message structure. - case map[string]interface{}: - val.(map[string]interface{})["_seq"] = seq // will overwrite an "_seq" XML tag - seq++ - case interface{}: // a non-nil simple element: string, float64, bool - v := map[string]interface{}{"#text": val} - v["_seq"] = seq - seq++ - val = v - } - } - - // 'na' holding sub-elements of n. - // See if 'key' already exists. - // If 'key' exists, then this is a list, if not just add key:val to na. - if v, ok := na[key]; ok { - var a []interface{} - switch v.(type) { - case []interface{}: - a = v.([]interface{}) - default: // anything else - note: v.(type) != nil - a = []interface{}{v} - } - a = append(a, val) - na[key] = a - } else { - na[key] = val // save it as a singleton - } - case xml.EndElement: - // len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case. - if len(n) == 0 { - // If len(na)==0 we have an empty element == ""; - // it has no xml.Attr nor xml.CharData. - // Note: in original node-tree parser, val defaulted to ""; - // so we always had the default if len(node.nodes) == 0. - if len(na) > 0 { - n[skey] = na - } else { - n[skey] = "" // empty element - } - } - return n, nil - case xml.CharData: - // clean up possible noise - tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ") - if len(tt) > 0 { - if len(na) > 0 || decodeSimpleValuesAsMap { - na["#text"] = cast(tt, r) - } else if skey != "" { - n[skey] = cast(tt, r) - } else { - // per Adrian (http://www.adrianlungu.com/) catch stray text - // in decoder stream - - // https://github.com/clbanning/mxj/pull/14#issuecomment-182816374 - // NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get - // a p.Token() decoding error when the BOM is UTF-16 or UTF-32. - continue - } - } - default: - // noop - } - } -} - -var castNanInf bool - -// Cast "Nan", "Inf", "-Inf" XML values to 'float64'. -// By default, these values will be decoded as 'string'. -func CastNanInf(b bool) { - castNanInf = b -} - -// cast - try to cast string values to bool or float64 -func cast(s string, r bool) interface{} { - if r { - // handle nan and inf - if !castNanInf { - switch strings.ToLower(s) { - case "nan", "inf", "-inf": - return s - } - } - - // handle numeric strings ahead of boolean - if f, err := strconv.ParseFloat(s, 64); err == nil { - return f - } - // ParseBool treats "1"==true & "0"==false, we've already scanned those - // values as float64. See if value has 't' or 'f' as initial screen to - // minimize calls to ParseBool; also, see if len(s) < 6. - if len(s) > 0 && len(s) < 6 { - switch s[:1] { - case "t", "T", "f", "F": - if b, err := strconv.ParseBool(s); err == nil { - return b - } - } - } - } - return s -} - -// ------------------ END: NewMapXml & NewMapXmlReader ------------------------- - -// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------ - -const ( - DefaultRootTag = "doc" -) - -var useGoXmlEmptyElemSyntax bool - -// XmlGoEmptyElemSyntax() - rather than . -// Go's encoding/xml package marshals empty XML elements as . By default this package -// encodes empty elements as . If you're marshaling Map values that include structures -// (which are passed to xml.Marshal for encoding), this will let you conform to the standard package. -func XmlGoEmptyElemSyntax() { - useGoXmlEmptyElemSyntax = true -} - -// XmlDefaultEmptyElemSyntax() - rather than . -// Return XML encoding for empty elements to the default package setting. -// Reverses effect of XmlGoEmptyElemSyntax(). -func XmlDefaultEmptyElemSyntax() { - useGoXmlEmptyElemSyntax = false -} - -// Encode a Map as XML. The companion of NewMapXml(). -// The following rules apply. -// - The key label "#text" is treated as the value for a simple element with attributes. -// - Map keys that begin with a hyphen, '-', are interpreted as attributes. -// It is an error if the attribute doesn't have a []byte, string, number, or boolean value. -// - Map value type encoding: -// > string, bool, float64, int, int32, int64, float32: per "%v" formating -// > []bool, []uint8: by casting to string -// > structures, etc.: handed to xml.Marshal() - if there is an error, the element -// value is "UNKNOWN" -// - Elements with only attribute values or are null are terminated using "/>". -// - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible. -// Thus, `{ "key":"value" }` encodes as "value". -// - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax(). -// The attributes tag=value pairs are alphabetized by "tag". Also, when encoding map[string]interface{} values - -// complex elements, etc. - the key:value pairs are alphabetized by key so the resulting tags will appear sorted. -func (mv Map) Xml(rootTag ...string) ([]byte, error) { - m := map[string]interface{}(mv) - var err error - s := new(string) - p := new(pretty) // just a stub - - if len(m) == 1 && len(rootTag) == 0 { - for key, value := range m { - // if it an array, see if all values are map[string]interface{} - // we force a new root tag if we'll end up with no key:value in the list - // so: key:[string_val, bool:true] --> string_valtrue - switch value.(type) { - case []interface{}: - for _, v := range value.([]interface{}) { - switch v.(type) { - case map[string]interface{}: // noop - default: // anything else - err = mapToXmlIndent(false, s, DefaultRootTag, m, p) - goto done - } - } - } - err = mapToXmlIndent(false, s, key, value, p) - } - } else if len(rootTag) == 1 { - err = mapToXmlIndent(false, s, rootTag[0], m, p) - } else { - err = mapToXmlIndent(false, s, DefaultRootTag, m, p) - } -done: - return []byte(*s), err -} - -// The following implementation is provided only for symmetry with NewMapXmlReader[Raw] -// The names will also provide a key for the number of return arguments. - -// Writes the Map as XML on the Writer. -// See Xml() for encoding rules. -func (mv Map) XmlWriter(xmlWriter io.Writer, rootTag ...string) error { - x, err := mv.Xml(rootTag...) - if err != nil { - return err - } - - _, err = xmlWriter.Write(x) - return err -} - -// Writes the Map as XML on the Writer. []byte is the raw XML that was written. -// See Xml() for encoding rules. -func (mv Map) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) { - x, err := mv.Xml(rootTag...) - if err != nil { - return x, err - } - - _, err = xmlWriter.Write(x) - return x, err -} - -// Writes the Map as pretty XML on the Writer. -// See Xml() for encoding rules. -func (mv Map) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error { - x, err := mv.XmlIndent(prefix, indent, rootTag...) - if err != nil { - return err - } - - _, err = xmlWriter.Write(x) - return err -} - -// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written. -// See Xml() for encoding rules. -func (mv Map) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) { - x, err := mv.XmlIndent(prefix, indent, rootTag...) - if err != nil { - return x, err - } - - _, err = xmlWriter.Write(x) - return x, err -} - -// -------------------- END: mv.Xml & mv.XmlWriter ------------------------------- - -// -------------- Handle XML stream by processing Map value -------------------- - -// Default poll delay to keep Handler from spinning on an open stream -// like sitting on os.Stdin waiting for imput. -var xhandlerPollInterval = time.Millisecond - -// Bulk process XML using handlers that process a Map value. -// 'rdr' is an io.Reader for XML (stream) -// 'mapHandler' is the Map processor. Return of 'false' stops io.Reader processing. -// 'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error. -// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. -// This means that you can stop reading the file on error or after processing a particular message. -// To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'. -func HandleXmlReader(xmlReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error { - var n int - for { - m, merr := NewMapXmlReader(xmlReader) - n++ - - // handle error condition with errhandler - if merr != nil && merr != io.EOF { - merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error()) - if ok := errHandler(merr); !ok { - // caused reader termination - return merr - } - continue - } - - // pass to maphandler - if len(m) != 0 { - if ok := mapHandler(m); !ok { - break - } - } else if merr != io.EOF { - time.Sleep(xhandlerPollInterval) - } - - if merr == io.EOF { - break - } - } - return nil -} - -// Bulk process XML using handlers that process a Map value and the raw XML. -// 'rdr' is an io.Reader for XML (stream) -// 'mapHandler' is the Map and raw XML - []byte - processor. Return of 'false' stops io.Reader processing. -// 'errHandler' is the error and raw XML processor. Return of 'false' stops io.Reader processing and returns the error. -// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. -// This means that you can stop reading the file on error or after processing a particular message. -// To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'. -// See NewMapXmlReaderRaw for comment on performance associated with retrieving raw XML from a Reader. -func HandleXmlReaderRaw(xmlReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error { - var n int - for { - m, raw, merr := NewMapXmlReaderRaw(xmlReader) - n++ - - // handle error condition with errhandler - if merr != nil && merr != io.EOF { - merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error()) - if ok := errHandler(merr, raw); !ok { - // caused reader termination - return merr - } - continue - } - - // pass to maphandler - if len(m) != 0 { - if ok := mapHandler(m, raw); !ok { - break - } - } else if merr != io.EOF { - time.Sleep(xhandlerPollInterval) - } - - if merr == io.EOF { - break - } - } - return nil -} - -// ----------------- END: Handle XML stream by processing Map value -------------- - -// -------- a hack of io.TeeReader ... need one that's an io.ByteReader for xml.NewDecoder() ---------- - -// This is a clone of io.TeeReader with the additional method t.ReadByte(). -// Thus, this TeeReader is also an io.ByteReader. -// This is necessary because xml.NewDecoder uses a ByteReader not a Reader. It appears to have been written -// with bufio.Reader or bytes.Reader in mind ... not a generic io.Reader, which doesn't have to have ReadByte().. -// If NewDecoder is passed a Reader that does not satisfy ByteReader() it wraps the Reader with -// bufio.NewReader and uses ReadByte rather than Read that runs the TeeReader pipe logic. - -type teeReader struct { - r io.Reader - w io.Writer - b []byte -} - -func myTeeReader(r io.Reader, w io.Writer) io.Reader { - b := make([]byte, 1) - return &teeReader{r, w, b} -} - -// need for io.Reader - but we don't use it ... -func (t *teeReader) Read(p []byte) (int, error) { - return 0, nil -} - -func (t *teeReader) ReadByte() (byte, error) { - n, err := t.r.Read(t.b) - if n > 0 { - if _, err := t.w.Write(t.b[:1]); err != nil { - return t.b[0], err - } - } - return t.b[0], err -} - -// For use with NewMapXmlReader & NewMapXmlSeqReader. -type byteReader struct { - r io.Reader - b []byte -} - -func myByteReader(r io.Reader) io.Reader { - b := make([]byte, 1) - return &byteReader{r, b} -} - -// Need for io.Reader interface ... -// Needed if reading a malformed http.Request.Body - issue #38. -func (b *byteReader) Read(p []byte) (int, error) { - return b.r.Read(p) -} - -func (b *byteReader) ReadByte() (byte, error) { - _, err := b.r.Read(b.b) - if len(b.b) > 0 { - return b.b[0], err - } - var c byte - return c, err -} - -// ----------------------- END: io.TeeReader hack ----------------------------------- - -// ---------------------- XmlIndent - from j2x package ---------------------------- - -// Encode a map[string]interface{} as a pretty XML string. -// See Xml for encoding rules. -func (mv Map) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) { - m := map[string]interface{}(mv) - - var err error - s := new(string) - p := new(pretty) - p.indent = indent - p.padding = prefix - - if len(m) == 1 && len(rootTag) == 0 { - // this can extract the key for the single map element - // use it if it isn't a key for a list - for key, value := range m { - if _, ok := value.([]interface{}); ok { - err = mapToXmlIndent(true, s, DefaultRootTag, m, p) - } else { - err = mapToXmlIndent(true, s, key, value, p) - } - } - } else if len(rootTag) == 1 { - err = mapToXmlIndent(true, s, rootTag[0], m, p) - } else { - err = mapToXmlIndent(true, s, DefaultRootTag, m, p) - } - return []byte(*s), err -} - -type pretty struct { - indent string - cnt int - padding string - mapDepth int - start int -} - -func (p *pretty) Indent() { - p.padding += p.indent - p.cnt++ -} - -func (p *pretty) Outdent() { - if p.cnt > 0 { - p.padding = p.padding[:len(p.padding)-len(p.indent)] - p.cnt-- - } -} - -// where the work actually happens -// returns an error if an attribute is not atomic -func mapToXmlIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error { - var endTag bool - var isSimple bool - var elen int - p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start} - - // per issue #48, 18apr18 - try and coerce maps to map[string]interface{} - // Don't need for mapToXmlSeqIndent, since maps there are decoded by NewMapXmlSeq(). - if reflect.ValueOf(value).Kind() == reflect.Map { - switch value.(type) { - case map[string]interface{}: - default: - val := make(map[string]interface{}) - vv := reflect.ValueOf(value) - keys := vv.MapKeys() - for _, k := range keys { - val[fmt.Sprint(k)] = vv.MapIndex(k).Interface() - } - value = val - } - } - - switch value.(type) { - // special handling of []interface{} values when len(value) == 0 - case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, json.Number: - if doIndent { - *s += p.padding - } - *s += `<` + key - } - switch value.(type) { - case map[string]interface{}: - vv := value.(map[string]interface{}) - lenvv := len(vv) - // scan out attributes - attribute keys have prepended attrPrefix - attrlist := make([][2]string, len(vv)) - var n int - var ss string - for k, v := range vv { - if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix { - switch v.(type) { - case string: - if xmlEscapeChars { - ss = escapeChars(v.(string)) - } else { - ss = v.(string) - } - attrlist[n][0] = k[lenAttrPrefix:] - attrlist[n][1] = ss - case float64, bool, int, int32, int64, float32, json.Number: - attrlist[n][0] = k[lenAttrPrefix:] - attrlist[n][1] = fmt.Sprintf("%v", v) - case []byte: - if xmlEscapeChars { - ss = escapeChars(string(v.([]byte))) - } else { - ss = string(v.([]byte)) - } - attrlist[n][0] = k[lenAttrPrefix:] - attrlist[n][1] = ss - default: - return fmt.Errorf("invalid attribute value for: %s:<%T>", k, v) - } - n++ - } - } - if n > 0 { - attrlist = attrlist[:n] - sort.Sort(attrList(attrlist)) - for _, v := range attrlist { - *s += ` ` + v[0] + `="` + v[1] + `"` - } - } - // only attributes? - if n == lenvv { - if useGoXmlEmptyElemSyntax { - *s += `" - } else { - *s += `/>` - } - break - } - - // simple element? Note: '#text" is an invalid XML tag. - if v, ok := vv["#text"]; ok && n+1 == lenvv { - switch v.(type) { - case string: - if xmlEscapeChars { - v = escapeChars(v.(string)) - } else { - v = v.(string) - } - case []byte: - if xmlEscapeChars { - v = escapeChars(string(v.([]byte))) - } - } - *s += ">" + fmt.Sprintf("%v", v) - endTag = true - elen = 1 - isSimple = true - break - } else if ok { - // Handle edge case where simple element with attributes - // is unmarshal'd using NewMapXml() where attribute prefix - // has been set to "". - // TODO(clb): should probably scan all keys for invalid chars. - return fmt.Errorf("invalid attribute key label: #text - due to attributes not being prefixed") - } - - // close tag with possible attributes - *s += ">" - if doIndent { - *s += "\n" - } - // something more complex - p.mapDepth++ - // extract the map k:v pairs and sort on key - elemlist := make([][2]interface{}, len(vv)) - n = 0 - for k, v := range vv { - if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix { - continue - } - elemlist[n][0] = k - elemlist[n][1] = v - n++ - } - elemlist = elemlist[:n] - sort.Sort(elemList(elemlist)) - var i int - for _, v := range elemlist { - switch v[1].(type) { - case []interface{}: - default: - if i == 0 && doIndent { - p.Indent() - } - } - i++ - if err := mapToXmlIndent(doIndent, s, v[0].(string), v[1], p); err != nil { - return err - } - switch v[1].(type) { - case []interface{}: // handled in []interface{} case - default: - if doIndent { - p.Outdent() - } - } - i-- - } - p.mapDepth-- - endTag = true - elen = 1 // we do have some content ... - case []interface{}: - // special case - found during implementing Issue #23 - if len(value.([]interface{})) == 0 { - if doIndent { - *s += p.padding + p.indent - } - *s += "<" + key - elen = 0 - endTag = true - break - } - for _, v := range value.([]interface{}) { - if doIndent { - p.Indent() - } - if err := mapToXmlIndent(doIndent, s, key, v, p); err != nil { - return err - } - if doIndent { - p.Outdent() - } - } - return nil - case []string: - // This was added by https://github.com/slotix ... not a type that - // would be encountered if mv generated from NewMapXml, NewMapJson. - // Could be encountered in AnyXml(), so we'll let it stay, though - // it should be merged with case []interface{}, above. - //quick fix for []string type - //[]string should be treated exaclty as []interface{} - if len(value.([]string)) == 0 { - if doIndent { - *s += p.padding + p.indent - } - *s += "<" + key - elen = 0 - endTag = true - break - } - for _, v := range value.([]string) { - if doIndent { - p.Indent() - } - if err := mapToXmlIndent(doIndent, s, key, v, p); err != nil { - return err - } - if doIndent { - p.Outdent() - } - } - return nil - case nil: - // terminate the tag - if doIndent { - *s += p.padding - } - *s += "<" + key - endTag, isSimple = true, true - break - default: // handle anything - even goofy stuff - elen = 0 - switch value.(type) { - case string: - v := value.(string) - if xmlEscapeChars { - v = escapeChars(v) - } - elen = len(v) - if elen > 0 { - *s += ">" + v - } - case float64, bool, int, int32, int64, float32, json.Number: - v := fmt.Sprintf("%v", value) - elen = len(v) // always > 0 - *s += ">" + v - case []byte: // NOTE: byte is just an alias for uint8 - // similar to how xml.Marshal handles []byte structure members - v := string(value.([]byte)) - if xmlEscapeChars { - v = escapeChars(v) - } - elen = len(v) - if elen > 0 { - *s += ">" + v - } - default: - var v []byte - var err error - if doIndent { - v, err = xml.MarshalIndent(value, p.padding, p.indent) - } else { - v, err = xml.Marshal(value) - } - if err != nil { - *s += ">UNKNOWN" - } else { - elen = len(v) - if elen > 0 { - *s += string(v) - } - } - } - isSimple = true - endTag = true - } - if endTag { - if doIndent { - if !isSimple { - *s += p.padding - } - } - if elen > 0 || useGoXmlEmptyElemSyntax { - if elen == 0 { - *s += ">" - } - *s += `" - } else { - *s += `/>` - } - } - if doIndent { - if p.cnt > p.start { - *s += "\n" - } - p.Outdent() - } - - return nil -} - -// ============================ sort interface implementation ================= - -type attrList [][2]string - -func (a attrList) Len() int { - return len(a) -} - -func (a attrList) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a attrList) Less(i, j int) bool { - return a[i][0] <= a[j][0] -} - -type elemList [][2]interface{} - -func (e elemList) Len() int { - return len(e) -} - -func (e elemList) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e elemList) Less(i, j int) bool { - return e[i][0].(string) <= e[j][0].(string) -} diff --git a/vendor/github.com/clbanning/mxj/xmlseq.go b/vendor/github.com/clbanning/mxj/xmlseq.go deleted file mode 100644 index 6be73ae6..00000000 --- a/vendor/github.com/clbanning/mxj/xmlseq.go +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright 2012-2016 Charles Banning. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file - -// xmlseq.go - version of xml.go with sequence # injection on Decoding and sorting on Encoding. -// Also, handles comments, directives and process instructions. - -package mxj - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io" - "sort" - "strings" -) - -var NoRoot = errors.New("no root key") -var NO_ROOT = NoRoot // maintain backwards compatibility - -// ------------------- NewMapXmlSeq & NewMapXmlSeqReader ... ------------------------- - -// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure. -// The xml.Decoder.RawToken method is used to parse the XML, so there is no checking for appropriate xml.EndElement values; -// thus, it is assumed that the XML is valid. -// -// NewMapXmlSeq - convert a XML doc into a Map with elements id'd with decoding sequence int - #seq. -// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible. -// NOTE: "#seq" key/value pairs are removed on encoding with mv.XmlSeq() / mv.XmlSeqIndent(). -// • attributes are a map - map["#attr"]map["attr_key"]map[string]interface{}{"#text":, "#seq":} -// • all simple elements are decoded as map["#text"]interface{} with a "#seq" k:v pair, as well. -// • lists always decode as map["list_tag"][]map[string]interface{} where the array elements are maps that -// include a "#seq" k:v pair based on sequence they are decoded. Thus, XML like: -// -// value 1 -// value 2 -// value 3 -// -// is decoded as: -// doc : -// ltag :[[]interface{}] -// [item: 0] -// #seq :[int] 0 -// #text :[string] value 1 -// [item: 1] -// #seq :[int] 2 -// #text :[string] value 3 -// newtag : -// #seq :[int] 1 -// #text :[string] value 2 -// It will encode in proper sequence even though the Map representation merges all "ltag" elements in an array. -// • comments - "" - are decoded as map["#comment"]map["#text"]"cmnt_text" with a "#seq" k:v pair. -// • directives - "" - are decoded as map["#directive"]map[#text"]"directive_text" with a "#seq" k:v pair. -// • process instructions - "" - are decoded as map["#procinst"]interface{} where the #procinst value -// is of map[string]interface{} type with the following keys: #target, #inst, and #seq. -// • comments, directives, and procinsts that are NOT part of a document with a root key will be returned as -// map[string]interface{} and the error value 'NoRoot'. -// • note: ": tag preserve the -// ":" notation rather than stripping it as with NewMapXml(). -// 2. Attribute keys for name space prefix declarations preserve "xmlns:" notation. -func NewMapXmlSeq(xmlVal []byte, cast ...bool) (Map, error) { - var r bool - if len(cast) == 1 { - r = cast[0] - } - return xmlSeqToMap(xmlVal, r) -} - -// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure. -// -// Get next XML doc from an io.Reader as a Map value. Returns Map value. -// NOTES: -// 1. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other -// extraneous xml.CharData will be ignored unless io.EOF is reached first. -// 2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to -// re-encode the message in its original structure. -// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. -func NewMapXmlSeqReader(xmlReader io.Reader, cast ...bool) (Map, error) { - var r bool - if len(cast) == 1 { - r = cast[0] - } - - // We need to put an *os.File reader in a ByteReader or the xml.NewDecoder - // will wrap it in a bufio.Reader and seek on the file beyond where the - // xml.Decoder parses! - if _, ok := xmlReader.(io.ByteReader); !ok { - xmlReader = myByteReader(xmlReader) // see code at EOF - } - - // build the map - return xmlSeqReaderToMap(xmlReader, r) -} - -// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure. -// -// Get next XML doc from an io.Reader as a Map value. Returns Map value and slice with the raw XML. -// NOTES: -// 1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte -// using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact. -// See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large -// data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body -// you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call. -// 2. The 'raw' return value may be larger than the XML text value. -// 3. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other -// extraneous xml.CharData will be ignored unless io.EOF is reached first. -// 4. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to -// re-encode the message in its original structure. -// 5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. -func NewMapXmlSeqReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) { - var r bool - if len(cast) == 1 { - r = cast[0] - } - // create TeeReader so we can retrieve raw XML - buf := make([]byte, 0) - wb := bytes.NewBuffer(buf) - trdr := myTeeReader(xmlReader, wb) - - m, err := xmlSeqReaderToMap(trdr, r) - - // retrieve the raw XML that was decoded - b := wb.Bytes() - - // err may be NoRoot - return m, b, err -} - -// xmlSeqReaderToMap() - parse a XML io.Reader to a map[string]interface{} value -func xmlSeqReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) { - // parse the Reader - p := xml.NewDecoder(rdr) - if CustomDecoder != nil { - useCustomDecoder(p) - } else { - p.CharsetReader = XmlCharsetReader - } - return xmlSeqToMapParser("", nil, p, r) -} - -// xmlSeqToMap - convert a XML doc into map[string]interface{} value -func xmlSeqToMap(doc []byte, r bool) (map[string]interface{}, error) { - b := bytes.NewReader(doc) - p := xml.NewDecoder(b) - if CustomDecoder != nil { - useCustomDecoder(p) - } else { - p.CharsetReader = XmlCharsetReader - } - return xmlSeqToMapParser("", nil, p, r) -} - -// ===================================== where the work happens ============================= - -// xmlSeqToMapParser - load a 'clean' XML doc into a map[string]interface{} directly. -// Add #seq tag value for each element decoded - to be used for Encoding later. -func xmlSeqToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) { - if snakeCaseKeys { - skey = strings.Replace(skey, "-", "_", -1) - } - - // NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'. - var n, na map[string]interface{} - var seq int // for including seq num when decoding - - // Allocate maps and load attributes, if any. - // NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through - // to get StartElement then recurse with skey==xml.StartElement.Name.Local - // where we begin allocating map[string]interface{} values 'n' and 'na'. - if skey != "" { - // 'n' only needs one slot - save call to runtime•hashGrow() - // 'na' we don't know - n = make(map[string]interface{}, 1) - na = make(map[string]interface{}) - if len(a) > 0 { - // xml.Attr is decoded into: map["#attr"]map[]interface{} - // where interface{} is map[string]interface{}{"#text":, "#seq":} - aa := make(map[string]interface{}, len(a)) - for i, v := range a { - if snakeCaseKeys { - v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1) - } - if len(v.Name.Space) > 0 { - aa[v.Name.Space+`:`+v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r), "#seq": i} - } else { - aa[v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r), "#seq": i} - } - } - na["#attr"] = aa - } - } - - // Return XMPP message. - if handleXMPPStreamTag && skey == "stream:stream" { - n[skey] = na - return n, nil - } - - for { - t, err := p.RawToken() - if err != nil { - if err != io.EOF { - return nil, errors.New("xml.Decoder.Token() - " + err.Error()) - } - return nil, err - } - switch t.(type) { - case xml.StartElement: - tt := t.(xml.StartElement) - - // First call to xmlSeqToMapParser() doesn't pass xml.StartElement - the map key. - // So when the loop is first entered, the first token is the root tag along - // with any attributes, which we process here. - // - // Subsequent calls to xmlSeqToMapParser() will pass in tag+attributes for - // processing before getting the next token which is the element value, - // which is done above. - if skey == "" { - if len(tt.Name.Space) > 0 { - return xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r) - } else { - return xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r) - } - } - - // If not initializing the map, parse the element. - // len(nn) == 1, necessarily - it is just an 'n'. - var nn map[string]interface{} - if len(tt.Name.Space) > 0 { - nn, err = xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r) - } else { - nn, err = xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r) - } - if err != nil { - return nil, err - } - - // The nn map[string]interface{} value is a na[nn_key] value. - // We need to see if nn_key already exists - means we're parsing a list. - // This may require converting na[nn_key] value into []interface{} type. - // First, extract the key:val for the map - it's a singleton. - var key string - var val interface{} - for key, val = range nn { - break - } - - // add "#seq" k:v pair - - // Sequence number included even in list elements - this should allow us - // to properly resequence even something goofy like: - // item 1 - // item 2 - // item 3 - // where all the "list" subelements are decoded into an array. - switch val.(type) { - case map[string]interface{}: - val.(map[string]interface{})["#seq"] = seq - seq++ - case interface{}: // a non-nil simple element: string, float64, bool - v := map[string]interface{}{"#text": val, "#seq": seq} - seq++ - val = v - } - - // 'na' holding sub-elements of n. - // See if 'key' already exists. - // If 'key' exists, then this is a list, if not just add key:val to na. - if v, ok := na[key]; ok { - var a []interface{} - switch v.(type) { - case []interface{}: - a = v.([]interface{}) - default: // anything else - note: v.(type) != nil - a = []interface{}{v} - } - a = append(a, val) - na[key] = a - } else { - na[key] = val // save it as a singleton - } - case xml.EndElement: - if skey != "" { - tt := t.(xml.EndElement) - if snakeCaseKeys { - tt.Name.Local = strings.Replace(tt.Name.Local, "-", "_", -1) - } - var name string - if len(tt.Name.Space) > 0 { - name = tt.Name.Space + `:` + tt.Name.Local - } else { - name = tt.Name.Local - } - if skey != name { - return nil, fmt.Errorf("element %s not properly terminated, got %s at #%d", - skey, name, p.InputOffset()) - } - } - // len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case. - if len(n) == 0 { - // If len(na)==0 we have an empty element == ""; - // it has no xml.Attr nor xml.CharData. - // Empty element content will be map["etag"]map["#text"]"" - // after #seq injection - map["etag"]map["#seq"]seq - after return. - if len(na) > 0 { - n[skey] = na - } else { - n[skey] = "" // empty element - } - } - return n, nil - case xml.CharData: - // clean up possible noise - tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ") - if skey == "" { - // per Adrian (http://www.adrianlungu.com/) catch stray text - // in decoder stream - - // https://github.com/clbanning/mxj/pull/14#issuecomment-182816374 - // NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get - // a p.Token() decoding error when the BOM is UTF-16 or UTF-32. - continue - } - if len(tt) > 0 { - // every simple element is a #text and has #seq associated with it - na["#text"] = cast(tt, r) - na["#seq"] = seq - seq++ - } - case xml.Comment: - if n == nil { // no root 'key' - n = map[string]interface{}{"#comment": string(t.(xml.Comment))} - return n, NoRoot - } - cm := make(map[string]interface{}, 2) - cm["#text"] = string(t.(xml.Comment)) - cm["#seq"] = seq - seq++ - na["#comment"] = cm - case xml.Directive: - if n == nil { // no root 'key' - n = map[string]interface{}{"#directive": string(t.(xml.Directive))} - return n, NoRoot - } - dm := make(map[string]interface{}, 2) - dm["#text"] = string(t.(xml.Directive)) - dm["#seq"] = seq - seq++ - na["#directive"] = dm - case xml.ProcInst: - if n == nil { - na = map[string]interface{}{"#target": t.(xml.ProcInst).Target, "#inst": string(t.(xml.ProcInst).Inst)} - n = map[string]interface{}{"#procinst": na} - return n, NoRoot - } - pm := make(map[string]interface{}, 3) - pm["#target"] = t.(xml.ProcInst).Target - pm["#inst"] = string(t.(xml.ProcInst).Inst) - pm["#seq"] = seq - seq++ - na["#procinst"] = pm - default: - // noop - shouldn't ever get here, now, since we handle all token types - } - } -} - -// ------------------ END: NewMapXml & NewMapXmlReader ------------------------- - -// --------------------- mv.XmlSeq & mv.XmlSeqWriter ------------------------- - -// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. -// -// Encode a Map as XML with elements sorted on #seq. The companion of NewMapXmlSeq(). -// The following rules apply. -// - The key label "#text" is treated as the value for a simple element with attributes. -// - The "#seq" key is used to seqence the subelements or attributes but is ignored for writing. -// - The "#attr" map key identifies the map of attribute map[string]interface{} values with "#text" key. -// - The "#comment" map key identifies a comment in the value "#text" map entry - . -// - The "#directive" map key identifies a directive in the value "#text" map entry - . -// - The "#procinst" map key identifies a process instruction in the value "#target" and "#inst" -// map entries - . -// - Value type encoding: -// > string, bool, float64, int, int32, int64, float32: per "%v" formating -// > []bool, []uint8: by casting to string -// > structures, etc.: handed to xml.Marshal() - if there is an error, the element -// value is "UNKNOWN" -// - Elements with only attribute values or are null are terminated using "/>" unless XmlGoEmptyElemSystax() called. -// - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible. -// Thus, `{ "key":"value" }` encodes as "value". -func (mv Map) XmlSeq(rootTag ...string) ([]byte, error) { - m := map[string]interface{}(mv) - var err error - s := new(string) - p := new(pretty) // just a stub - - if len(m) == 1 && len(rootTag) == 0 { - for key, value := range m { - // if it's an array, see if all values are map[string]interface{} - // we force a new root tag if we'll end up with no key:value in the list - // so: key:[string_val, bool:true] --> string_valtrue - switch value.(type) { - case []interface{}: - for _, v := range value.([]interface{}) { - switch v.(type) { - case map[string]interface{}: // noop - default: // anything else - err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p) - goto done - } - } - } - err = mapToXmlSeqIndent(false, s, key, value, p) - } - } else if len(rootTag) == 1 { - err = mapToXmlSeqIndent(false, s, rootTag[0], m, p) - } else { - err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p) - } -done: - return []byte(*s), err -} - -// The following implementation is provided only for symmetry with NewMapXmlReader[Raw] -// The names will also provide a key for the number of return arguments. - -// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. -// -// Writes the Map as XML on the Writer. -// See XmlSeq() for encoding rules. -func (mv Map) XmlSeqWriter(xmlWriter io.Writer, rootTag ...string) error { - x, err := mv.XmlSeq(rootTag...) - if err != nil { - return err - } - - _, err = xmlWriter.Write(x) - return err -} - -// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. -// -// Writes the Map as XML on the Writer. []byte is the raw XML that was written. -// See XmlSeq() for encoding rules. -func (mv Map) XmlSeqWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) { - x, err := mv.XmlSeq(rootTag...) - if err != nil { - return x, err - } - - _, err = xmlWriter.Write(x) - return x, err -} - -// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. -// -// Writes the Map as pretty XML on the Writer. -// See Xml() for encoding rules. -func (mv Map) XmlSeqIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error { - x, err := mv.XmlSeqIndent(prefix, indent, rootTag...) - if err != nil { - return err - } - - _, err = xmlWriter.Write(x) - return err -} - -// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. -// -// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written. -// See XmlSeq() for encoding rules. -func (mv Map) XmlSeqIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) { - x, err := mv.XmlSeqIndent(prefix, indent, rootTag...) - if err != nil { - return x, err - } - - _, err = xmlWriter.Write(x) - return x, err -} - -// -------------------- END: mv.Xml & mv.XmlWriter ------------------------------- - -// ---------------------- XmlSeqIndent ---------------------------- - -// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co. -// -// Encode a map[string]interface{} as a pretty XML string. -// See mv.XmlSeq() for encoding rules. -func (mv Map) XmlSeqIndent(prefix, indent string, rootTag ...string) ([]byte, error) { - m := map[string]interface{}(mv) - - var err error - s := new(string) - p := new(pretty) - p.indent = indent - p.padding = prefix - - if len(m) == 1 && len(rootTag) == 0 { - // this can extract the key for the single map element - // use it if it isn't a key for a list - for key, value := range m { - if _, ok := value.([]interface{}); ok { - err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p) - } else { - err = mapToXmlSeqIndent(true, s, key, value, p) - } - } - } else if len(rootTag) == 1 { - err = mapToXmlSeqIndent(true, s, rootTag[0], m, p) - } else { - err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p) - } - return []byte(*s), err -} - -// where the work actually happens -// returns an error if an attribute is not atomic -func mapToXmlSeqIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error { - var endTag bool - var isSimple bool - var noEndTag bool - var elen int - var ss string - p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start} - - switch value.(type) { - case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32: - if doIndent { - *s += p.padding - } - if key != "#comment" && key != "#directive" && key != "#procinst" { - *s += `<` + key - } - } - switch value.(type) { - case map[string]interface{}: - val := value.(map[string]interface{}) - - if key == "#comment" { - *s += `` - noEndTag = true - break - } - - if key == "#directive" { - *s += `` - noEndTag = true - break - } - - if key == "#procinst" { - *s += `` - noEndTag = true - break - } - - haveAttrs := false - // process attributes first - if v, ok := val["#attr"].(map[string]interface{}); ok { - // First, unroll the map[string]interface{} into a []keyval array. - // Then sequence it. - kv := make([]keyval, len(v)) - n := 0 - for ak, av := range v { - kv[n] = keyval{ak, av} - n++ - } - sort.Sort(elemListSeq(kv)) - // Now encode the attributes in original decoding sequence, using keyval array. - for _, a := range kv { - vv := a.v.(map[string]interface{}) - switch vv["#text"].(type) { - case string: - if xmlEscapeChars { - ss = escapeChars(vv["#text"].(string)) - } else { - ss = vv["#text"].(string) - } - *s += ` ` + a.k + `="` + ss + `"` - case float64, bool, int, int32, int64, float32: - *s += ` ` + a.k + `="` + fmt.Sprintf("%v", vv["#text"]) + `"` - case []byte: - if xmlEscapeChars { - ss = escapeChars(string(vv["#text"].([]byte))) - } else { - ss = string(vv["#text"].([]byte)) - } - *s += ` ` + a.k + `="` + ss + `"` - default: - return fmt.Errorf("invalid attribute value for: %s", a.k) - } - } - haveAttrs = true - } - - // simple element? - // every map value has, at least, "#seq" and, perhaps, "#text" and/or "#attr" - _, seqOK := val["#seq"] // have key - if v, ok := val["#text"]; ok && ((len(val) == 3 && haveAttrs) || (len(val) == 2 && !haveAttrs)) && seqOK { - if stmp, ok := v.(string); ok && stmp != "" { - if xmlEscapeChars { - stmp = escapeChars(stmp) - } - *s += ">" + stmp - endTag = true - elen = 1 - } - isSimple = true - break - } else if !ok && ((len(val) == 2 && haveAttrs) || (len(val) == 1 && !haveAttrs)) && seqOK { - // here no #text but have #seq or #seq+#attr - endTag = false - break - } - - // we now need to sequence everything except attributes - // 'kv' will hold everything that needs to be written - kv := make([]keyval, 0) - for k, v := range val { - if k == "#attr" { // already processed - continue - } - if k == "#seq" { // ignore - just for sorting - continue - } - switch v.(type) { - case []interface{}: - // unwind the array as separate entries - for _, vv := range v.([]interface{}) { - kv = append(kv, keyval{k, vv}) - } - default: - kv = append(kv, keyval{k, v}) - } - } - - // close tag with possible attributes - *s += ">" - if doIndent { - *s += "\n" - } - // something more complex - p.mapDepth++ - sort.Sort(elemListSeq(kv)) - i := 0 - for _, v := range kv { - switch v.v.(type) { - case []interface{}: - default: - if i == 0 && doIndent { - p.Indent() - } - } - i++ - if err := mapToXmlSeqIndent(doIndent, s, v.k, v.v, p); err != nil { - return err - } - switch v.v.(type) { - case []interface{}: // handled in []interface{} case - default: - if doIndent { - p.Outdent() - } - } - i-- - } - p.mapDepth-- - endTag = true - elen = 1 // we do have some content other than attrs - case []interface{}: - for _, v := range value.([]interface{}) { - if doIndent { - p.Indent() - } - if err := mapToXmlSeqIndent(doIndent, s, key, v, p); err != nil { - return err - } - if doIndent { - p.Outdent() - } - } - return nil - case nil: - // terminate the tag - if doIndent { - *s += p.padding - } - *s += "<" + key - endTag, isSimple = true, true - break - default: // handle anything - even goofy stuff - elen = 0 - switch value.(type) { - case string: - if xmlEscapeChars { - ss = escapeChars(value.(string)) - } else { - ss = value.(string) - } - elen = len(ss) - if elen > 0 { - *s += ">" + ss - } - case float64, bool, int, int32, int64, float32: - v := fmt.Sprintf("%v", value) - elen = len(v) - if elen > 0 { - *s += ">" + v - } - case []byte: // NOTE: byte is just an alias for uint8 - // similar to how xml.Marshal handles []byte structure members - if xmlEscapeChars { - ss = escapeChars(string(value.([]byte))) - } else { - ss = string(value.([]byte)) - } - elen = len(ss) - if elen > 0 { - *s += ">" + ss - } - default: - var v []byte - var err error - if doIndent { - v, err = xml.MarshalIndent(value, p.padding, p.indent) - } else { - v, err = xml.Marshal(value) - } - if err != nil { - *s += ">UNKNOWN" - } else { - elen = len(v) - if elen > 0 { - *s += string(v) - } - } - } - isSimple = true - endTag = true - } - if endTag && !noEndTag { - if doIndent { - if !isSimple { - *s += p.padding - } - } - switch value.(type) { - case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32: - if elen > 0 || useGoXmlEmptyElemSyntax { - if elen == 0 { - *s += ">" - } - *s += `" - } else { - *s += `/>` - } - } - } else if !noEndTag { - if useGoXmlEmptyElemSyntax { - *s += `" - // *s += ">" - } else { - *s += "/>" - } - } - if doIndent { - if p.cnt > p.start { - *s += "\n" - } - p.Outdent() - } - - return nil -} - -// the element sort implementation - -type keyval struct { - k string - v interface{} -} -type elemListSeq []keyval - -func (e elemListSeq) Len() int { - return len(e) -} - -func (e elemListSeq) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e elemListSeq) Less(i, j int) bool { - var iseq, jseq int - var ok bool - if iseq, ok = e[i].v.(map[string]interface{})["#seq"].(int); !ok { - iseq = 9999999 - } - - if jseq, ok = e[j].v.(map[string]interface{})["#seq"].(int); !ok { - jseq = 9999999 - } - - return iseq <= jseq -} - -// =============== https://groups.google.com/forum/#!topic/golang-nuts/lHPOHD-8qio - -// BeautifyXml (re)formats an XML doc similar to Map.XmlIndent(). -func BeautifyXml(b []byte, prefix, indent string) ([]byte, error) { - x, err := NewMapXmlSeq(b) - if err != nil { - return nil, err - } - return x.XmlSeqIndent(prefix, indent) -} diff --git a/vendor/github.com/franela/goreq/.gitignore b/vendor/github.com/franela/goreq/.gitignore deleted file mode 100644 index 131fc16f..00000000 --- a/vendor/github.com/franela/goreq/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -src diff --git a/vendor/github.com/franela/goreq/.travis.yml b/vendor/github.com/franela/goreq/.travis.yml deleted file mode 100644 index 916dd688..00000000 --- a/vendor/github.com/franela/goreq/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - 1.9.2 - - tip -notifications: - email: - - ionathan@gmail.com - - marcosnils@gmail.com diff --git a/vendor/github.com/franela/goreq/LICENSE b/vendor/github.com/franela/goreq/LICENSE deleted file mode 100644 index 068dee1e..00000000 --- a/vendor/github.com/franela/goreq/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Jonathan Leibiusky and Marcos Lilljedahl - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/franela/goreq/Makefile b/vendor/github.com/franela/goreq/Makefile deleted file mode 100644 index 0f04d657..00000000 --- a/vendor/github.com/franela/goreq/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -test: - go get -v -d -t ./... - go test -v diff --git a/vendor/github.com/franela/goreq/README.md b/vendor/github.com/franela/goreq/README.md deleted file mode 100644 index c202f326..00000000 --- a/vendor/github.com/franela/goreq/README.md +++ /dev/null @@ -1,444 +0,0 @@ -[![Build Status](https://img.shields.io/travis/franela/goreq/master.svg)](https://travis-ci.org/franela/goreq) -[![GoDoc](https://godoc.org/github.com/franela/goreq?status.svg)](https://godoc.org/github.com/franela/goreq) - -GoReq -======= - -Simple and sane HTTP request library for Go language. - - - -**Table of Contents** - -- [Why GoReq?](#user-content-why-goreq) -- [How do I install it?](#user-content-how-do-i-install-it) -- [What can I do with it?](#user-content-what-can-i-do-with-it) - - [Making requests with different methods](#user-content-making-requests-with-different-methods) - - [GET](#user-content-get) - - [Tags](#user-content-tags) - - [POST](#user-content-post) - - [Sending payloads in the Body](#user-content-sending-payloads-in-the-body) - - [Specifiying request headers](#user-content-specifiying-request-headers) - - [Sending Cookies](#cookie-support) - - [Setting timeouts](#user-content-setting-timeouts) - - [Using the Response and Error](#user-content-using-the-response-and-error) - - [Receiving JSON](#user-content-receiving-json) - - [Sending/Receiving Compressed Payloads](#user-content-sendingreceiving-compressed-payloads) - - [Using gzip compression:](#user-content-using-gzip-compression) - - [Using deflate compression:](#user-content-using-deflate-compression) - - [Using compressed responses:](#user-content-using-compressed-responses) - - [Proxy](#proxy) - - [Debugging requests](#debug) - - [Getting raw Request & Response](#getting-raw-request--response) - - [TODO:](#user-content-todo) - - - -Why GoReq? -========== - -Go has very nice native libraries that allows you to do lots of cool things. But sometimes those libraries are too low level, which means that to do a simple thing, like an HTTP Request, it takes some time. And if you want to do something as simple as adding a timeout to a request, you will end up writing several lines of code. - -This is why we think GoReq is useful. Because you can do all your HTTP requests in a very simple and comprehensive way, while enabling you to do more advanced stuff by giving you access to the native API. - -How do I install it? -==================== - -```bash -go get github.com/franela/goreq -``` - -What can I do with it? -====================== - -## Making requests with different methods - -#### GET -```go -res, err := goreq.Request{ Uri: "http://www.google.com" }.Do() -``` - -GoReq default method is GET. - -You can also set value to GET method easily - -```go -type Item struct { - Limit int - Skip int - Fields string -} - -item := Item { - Limit: 3, - Skip: 5, - Fields: "Value", -} - -res, err := goreq.Request{ - Uri: "http://localhost:3000/", - QueryString: item, -}.Do() -``` -The sample above will send `http://localhost:3000/?limit=3&skip=5&fields=Value` - -Alternatively the `url` tag can be used in struct fields to customize encoding properties - -```go -type Item struct { - TheLimit int `url:"the_limit"` - TheSkip string `url:"the_skip,omitempty"` - TheFields string `url:"-"` -} - -item := Item { - TheLimit: 3, - TheSkip: "", - TheFields: "Value", -} - -res, err := goreq.Request{ - Uri: "http://localhost:3000/", - QueryString: item, -}.Do() -``` -The sample above will send `http://localhost:3000/?the_limit=3` - - -QueryString also support url.Values - -```go -item := url.Values{} -item.Set("Limit", 3) -item.Add("Field", "somefield") -item.Add("Field", "someotherfield") - -res, err := goreq.Request{ - Uri: "http://localhost:3000/", - QueryString: item, -}.Do() -``` - -The sample above will send `http://localhost:3000/?limit=3&field=somefield&field=someotherfield` - -### Tags - -Struct field `url` tag is mainly used as the request parameter name. -Tags can be comma separated multiple values, 1st value is for naming and rest has special meanings. - -- special tag for 1st value - - `-`: value is ignored if set this - -- special tag for rest 2nd value - - `omitempty`: zero-value is ignored if set this - - `squash`: the fields of embedded struct is used for parameter - -#### Tag Examples - -```go -type Place struct { - Country string `url:"country"` - City string `url:"city"` - ZipCode string `url:"zipcode,omitempty"` -} - -type Person struct { - Place `url:",squash"` - - FirstName string `url:"first_name"` - LastName string `url:"last_name"` - Age string `url:"age,omitempty"` - Password string `url:"-"` -} - -johnbull := Person{ - Place: Place{ // squash the embedded struct value - Country: "UK", - City: "London", - ZipCode: "SW1", - }, - FirstName: "John", - LastName: "Doe", - Age: "35", - Password: "my-secret", // ignored for parameter -} - -goreq.Request{ - Uri: "http://localhost/", - QueryString: johnbull, -}.Do() -// => `http://localhost/?first_name=John&last_name=Doe&age=35&country=UK&city=London&zip_code=SW1` - - -// age and zipcode will be ignored because of `omitempty` -// but firstname isn't. -samurai := Person{ - Place: Place{ // squash the embedded struct value - Country: "Japan", - City: "Tokyo", - }, - LastName: "Yagyu", -} - -goreq.Request{ - Uri: "http://localhost/", - QueryString: samurai, -}.Do() -// => `http://localhost/?first_name=&last_name=yagyu&country=Japan&city=Tokyo` -``` - - -#### POST - -```go -res, err := goreq.Request{ Method: "POST", Uri: "http://www.google.com" }.Do() -``` - -## Sending payloads in the Body - -You can send ```string```, ```Reader``` or ```interface{}``` in the body. The first two will be sent as text. The last one will be marshalled to JSON, if possible. - -```go -type Item struct { - Id int - Name string -} - -item := Item{ Id: 1111, Name: "foobar" } - -res, err := goreq.Request{ - Method: "POST", - Uri: "http://www.google.com", - Body: item, -}.Do() -``` - -## Specifiying request headers - -We think that most of the times the request headers that you use are: ```Host```, ```Content-Type```, ```Accept``` and ```User-Agent```. This is why we decided to make it very easy to set these headers. - -```go -res, err := goreq.Request{ - Uri: "http://www.google.com", - Host: "foobar.com", - Accept: "application/json", - ContentType: "application/json", - UserAgent: "goreq", -}.Do() -``` - -But sometimes you need to set other headers. You can still do it. - -```go -req := goreq.Request{ Uri: "http://www.google.com" } - -req.AddHeader("X-Custom", "somevalue") - -req.Do() -``` - -Alternatively you can use the `WithHeader` function to keep the syntax short - -```go -res, err = goreq.Request{ Uri: "http://www.google.com" }.WithHeader("X-Custom", "somevalue").Do() -``` - -## Cookie support - -Cookies can be either set at the request level by sending a [CookieJar](http://golang.org/pkg/net/http/cookiejar/) in the `CookieJar` request field -or you can use goreq's one-liner WithCookie method as shown below - -```go -res, err := goreq.Request{ - Uri: "http://www.google.com", -}. -WithCookie(&http.Cookie{Name: "c1", Value: "v1"}). -Do() -``` - -## Setting timeouts - -GoReq supports 2 kind of timeouts. A general connection timeout and a request specific one. By default the connection timeout is of 1 second. There is no default for request timeout, which means it will wait forever. - -You can change the connection timeout doing: - -```go -goreq.SetConnectTimeout(100 * time.Millisecond) -``` - -And specify the request timeout doing: - -```go -res, err := goreq.Request{ - Uri: "http://www.google.com", - Timeout: 500 * time.Millisecond, -}.Do() -``` - -## Using the Response and Error - -GoReq will always return 2 values: a ```Response``` and an ```Error```. -If ```Error``` is not ```nil``` it means that an error happened while doing the request and you shouldn't use the ```Response``` in any way. -You can check what happened by getting the error message: - -```go -fmt.Println(err.Error()) -``` -And to make it easy to know if it was a timeout error, you can ask the error or return it: - -```go -if serr, ok := err.(*goreq.Error); ok { - if serr.Timeout() { - ... - } -} -return err -``` - -If you don't get an error, you can safely use the ```Response```. - -```go -res.Uri // return final URL location of the response (fulfilled after redirect was made) -res.StatusCode // return the status code of the response -res.Body // gives you access to the body -res.Body.ToString() // will return the body as a string -res.Header.Get("Content-Type") // gives you access to all the response headers -``` -Remember that you should **always** close `res.Body` if it's not `nil` - -## Receiving JSON - -GoReq will help you to receive and unmarshal JSON. - -```go -type Item struct { - Id int - Name string -} - -var item Item - -res.Body.FromJsonTo(&item) -``` - -## Sending/Receiving Compressed Payloads -GoReq supports gzip, deflate and zlib compression of requests' body and transparent decompression of responses provided they have a correct `Content-Encoding` header. - -##### Using gzip compression: -```go -res, err := goreq.Request{ - Method: "POST", - Uri: "http://www.google.com", - Body: item, - Compression: goreq.Gzip(), -}.Do() -``` -##### Using deflate/zlib compression: -```go -res, err := goreq.Request{ - Method: "POST", - Uri: "http://www.google.com", - Body: item, - Compression: goreq.Deflate(), -}.Do() -``` -##### Using compressed responses: -If servers replies a correct and matching `Content-Encoding` header (gzip requires `Content-Encoding: gzip` and deflate `Content-Encoding: deflate`) goreq transparently decompresses the response so the previous example should always work: -```go -type Item struct { - Id int - Name string -} -res, err := goreq.Request{ - Method: "POST", - Uri: "http://www.google.com", - Body: item, - Compression: goreq.Gzip(), -}.Do() -var item Item -res.Body.FromJsonTo(&item) -``` -If no `Content-Encoding` header is replied by the server GoReq will return the crude response. - -## Proxy -If you need to use a proxy for your requests GoReq supports the standard `http_proxy` env variable as well as manually setting the proxy for each request - -```go -res, err := goreq.Request{ - Method: "GET", - Proxy: "http://myproxy:myproxyport", - Uri: "http://www.google.com", -}.Do() -``` - -### Proxy basic auth is also supported - -```go -res, err := goreq.Request{ - Method: "GET", - Proxy: "http://user:pass@myproxy:myproxyport", - Uri: "http://www.google.com", -}.Do() -``` - -## Debug -If you need to debug your http requests, it can print the http request detail. - -```go -res, err := goreq.Request{ - Method: "GET", - Uri: "http://www.google.com", - Compression: goreq.Gzip(), - ShowDebug: true, -}.Do() -fmt.Println(res, err) -``` - -and it will print the log: -``` -GET / HTTP/1.1 -Host: www.google.com -Accept: -Accept-Encoding: gzip -Content-Encoding: gzip -Content-Type: -``` - - -### Getting raw Request & Response - -To get the Request: - -```go -req := goreq.Request{ - Host: "foobar.com", -} - -//req.Request will return a new instance of an http.Request so you can safely use it for something else -request, _ := req.NewRequest() - -``` - - -To get the Response: - -```go -res, err := goreq.Request{ - Method: "GET", - Uri: "http://www.google.com", - Compression: goreq.Gzip(), - ShowDebug: true, -}.Do() - -// res.Response will contain the original http.Response structure -fmt.Println(res.Response, err) -``` - - - - -TODO: ------ - -We do have a couple of [issues](https://github.com/franela/goreq/issues) pending we'll be addressing soon. But feel free to -contribute and send us PRs (with tests please :smile:). diff --git a/vendor/github.com/franela/goreq/goreq.go b/vendor/github.com/franela/goreq/goreq.go deleted file mode 100644 index 801c7d30..00000000 --- a/vendor/github.com/franela/goreq/goreq.go +++ /dev/null @@ -1,515 +0,0 @@ -package goreq - -import ( - "bufio" - "bytes" - "compress/gzip" - "compress/zlib" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/httputil" - "net/url" - "reflect" - "strings" - "time" -) - -type itimeout interface { - Timeout() bool -} -type Request struct { - headers []headerTuple - cookies []*http.Cookie - Method string - Uri string - Body interface{} - QueryString interface{} - Timeout time.Duration - ContentType string - Accept string - Host string - UserAgent string - Insecure bool - MaxRedirects int - RedirectHeaders bool - Proxy string - proxyConnectHeaders []headerTuple - Compression *compression - BasicAuthUsername string - BasicAuthPassword string - CookieJar http.CookieJar - ShowDebug bool - OnBeforeRequest func(goreq *Request, httpreq *http.Request) -} - -type compression struct { - writer func(buffer io.Writer) (io.WriteCloser, error) - reader func(buffer io.Reader) (io.ReadCloser, error) - ContentEncoding string -} - -type Response struct { - *http.Response - Uri string - Body *Body - req *http.Request -} - -func (r Response) CancelRequest() { - cancelRequest(DefaultTransport, r.req) - -} - -func cancelRequest(transport interface{}, r *http.Request) { - if tp, ok := transport.(transportRequestCanceler); ok { - tp.CancelRequest(r) - } -} - -type headerTuple struct { - name string - value string -} - -type Body struct { - reader io.ReadCloser - compressedReader io.ReadCloser -} - -type Error struct { - timeout bool - Err error -} - -type transportRequestCanceler interface { - CancelRequest(*http.Request) -} - -func (e *Error) Timeout() bool { - return e.timeout -} - -func (e *Error) Error() string { - return e.Err.Error() -} - -func (b *Body) Read(p []byte) (int, error) { - if b.compressedReader != nil { - return b.compressedReader.Read(p) - } - return b.reader.Read(p) -} - -func (b *Body) Close() error { - err := b.reader.Close() - if b.compressedReader != nil { - return b.compressedReader.Close() - } - return err -} - -func (b *Body) FromJsonTo(o interface{}) error { - return json.NewDecoder(b).Decode(o) -} - -func (b *Body) ToString() (string, error) { - body, err := ioutil.ReadAll(b) - if err != nil { - return "", err - } - return string(body), nil -} - -func Gzip() *compression { - reader := func(buffer io.Reader) (io.ReadCloser, error) { - return gzip.NewReader(buffer) - } - writer := func(buffer io.Writer) (io.WriteCloser, error) { - return gzip.NewWriter(buffer), nil - } - return &compression{writer: writer, reader: reader, ContentEncoding: "gzip"} -} - -func Deflate() *compression { - reader := func(buffer io.Reader) (io.ReadCloser, error) { - return zlib.NewReader(buffer) - } - writer := func(buffer io.Writer) (io.WriteCloser, error) { - return zlib.NewWriter(buffer), nil - } - return &compression{writer: writer, reader: reader, ContentEncoding: "deflate"} -} - -func Zlib() *compression { - return Deflate() -} - -func paramParse(query interface{}) (string, error) { - switch query.(type) { - case url.Values: - return query.(url.Values).Encode(), nil - case *url.Values: - return query.(*url.Values).Encode(), nil - default: - var v = &url.Values{} - err := paramParseStruct(v, query) - return v.Encode(), err - } -} - -func paramParseStruct(v *url.Values, query interface{}) error { - var ( - s = reflect.ValueOf(query) - t = reflect.TypeOf(query) - ) - for t.Kind() == reflect.Ptr || t.Kind() == reflect.Interface { - s = s.Elem() - t = s.Type() - } - - if t.Kind() != reflect.Struct { - return errors.New("Can not parse QueryString.") - } - - for i := 0; i < t.NumField(); i++ { - var name string - - field := s.Field(i) - typeField := t.Field(i) - - if !field.CanInterface() { - continue - } - - urlTag := typeField.Tag.Get("url") - if urlTag == "-" { - continue - } - - name, opts := parseTag(urlTag) - - var omitEmpty, squash bool - omitEmpty = opts.Contains("omitempty") - squash = opts.Contains("squash") - - if squash { - err := paramParseStruct(v, field.Interface()) - if err != nil { - return err - } - continue - } - - if urlTag == "" { - name = strings.ToLower(typeField.Name) - } - - if val := fmt.Sprintf("%v", field.Interface()); !(omitEmpty && len(val) == 0) { - v.Add(name, val) - } - } - return nil -} - -func prepareRequestBody(b interface{}) (io.Reader, error) { - switch b.(type) { - case string: - // treat is as text - return strings.NewReader(b.(string)), nil - case io.Reader: - // treat is as text - return b.(io.Reader), nil - case []byte: - //treat as byte array - return bytes.NewReader(b.([]byte)), nil - case nil: - return nil, nil - default: - // try to jsonify it - j, err := json.Marshal(b) - if err == nil { - return bytes.NewReader(j), nil - } - return nil, err - } -} - -var DefaultDialer = &net.Dialer{Timeout: 1000 * time.Millisecond} -var DefaultTransport http.RoundTripper = &http.Transport{Dial: DefaultDialer.Dial, Proxy: http.ProxyFromEnvironment} -var DefaultClient = &http.Client{Transport: DefaultTransport} - -var proxyTransport http.RoundTripper -var proxyClient *http.Client - -func SetConnectTimeout(duration time.Duration) { - DefaultDialer.Timeout = duration -} - -func (r *Request) AddHeader(name string, value string) { - if r.headers == nil { - r.headers = []headerTuple{} - } - r.headers = append(r.headers, headerTuple{name: name, value: value}) -} - -func (r Request) WithHeader(name string, value string) Request { - r.AddHeader(name, value) - return r -} - -func (r *Request) AddCookie(c *http.Cookie) { - r.cookies = append(r.cookies, c) -} - -func (r Request) WithCookie(c *http.Cookie) Request { - r.AddCookie(c) - return r -} - -func (r *Request) AddProxyConnectHeader(name string, value string) { - if r.proxyConnectHeaders == nil { - r.proxyConnectHeaders = []headerTuple{} - } - r.proxyConnectHeaders = append(r.proxyConnectHeaders, headerTuple{name: name, value: value}) -} - -func (r Request) WithProxyConnectHeader(name string, value string) Request { - r.AddProxyConnectHeader(name, value) - return r -} - -func (r Request) Do() (*Response, error) { - var client = DefaultClient - var transport = DefaultTransport - var resUri string - var redirectFailed bool - - r.Method = valueOrDefault(r.Method, "GET") - - // use a client with a cookie jar if necessary. We create a new client not - // to modify the default one. - if r.CookieJar != nil { - client = &http.Client{ - Transport: transport, - Jar: r.CookieJar, - } - } - - if r.Proxy != "" { - proxyUrl, err := url.Parse(r.Proxy) - if err != nil { - // proxy address is in a wrong format - return nil, &Error{Err: err} - } - - proxyHeader := make(http.Header) - if r.proxyConnectHeaders != nil { - for _, header := range r.proxyConnectHeaders { - proxyHeader.Add(header.name, header.value) - } - } - - //If jar is specified new client needs to be built - if proxyTransport == nil || client.Jar != nil { - proxyTransport = &http.Transport{ - Dial: DefaultDialer.Dial, - Proxy: http.ProxyURL(proxyUrl), - ProxyConnectHeader: proxyHeader, - } - proxyClient = &http.Client{Transport: proxyTransport, Jar: client.Jar} - } else if proxyTransport, ok := proxyTransport.(*http.Transport); ok { - proxyTransport.Proxy = http.ProxyURL(proxyUrl) - proxyTransport.ProxyConnectHeader = proxyHeader - } - transport = proxyTransport - client = proxyClient - } - - client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - - if len(via) > r.MaxRedirects { - redirectFailed = true - return errors.New("Error redirecting. MaxRedirects reached") - } - - resUri = req.URL.String() - - //By default Golang will not redirect request headers - // https://code.google.com/p/go/issues/detail?id=4800&q=request%20header - if r.RedirectHeaders { - for key, val := range via[0].Header { - req.Header[key] = val - } - } - return nil - } - - if transport, ok := transport.(*http.Transport); ok { - if r.Insecure { - if transport.TLSClientConfig != nil { - transport.TLSClientConfig.InsecureSkipVerify = true - } else { - transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - } - } else if transport.TLSClientConfig != nil { - // the default TLS client (when transport.TLSClientConfig==nil) is - // already set to verify, so do nothing in that case - transport.TLSClientConfig.InsecureSkipVerify = false - } - } - - req, err := r.NewRequest() - - if err != nil { - // we couldn't parse the URL. - return nil, &Error{Err: err} - } - - timeout := false - if r.Timeout > 0 { - client.Timeout = r.Timeout - } - - if r.ShowDebug { - dump, err := httputil.DumpRequest(req, true) - if err != nil { - log.Println(err) - } - log.Println(string(dump)) - } - - if r.OnBeforeRequest != nil { - r.OnBeforeRequest(&r, req) - } - res, err := client.Do(req) - - if err != nil { - if !timeout { - if t, ok := err.(itimeout); ok { - timeout = t.Timeout() - } - if ue, ok := err.(*url.Error); ok { - if t, ok := ue.Err.(itimeout); ok { - timeout = t.Timeout() - } - } - } - - var response *Response - //If redirect fails we still want to return response data - if redirectFailed { - if res != nil { - response = &Response{res, resUri, &Body{reader: res.Body}, req} - } else { - response = &Response{res, resUri, nil, req} - } - } - - //If redirect fails and we haven't set a redirect count we shouldn't return an error - if redirectFailed && r.MaxRedirects == 0 { - return response, nil - } - - return response, &Error{timeout: timeout, Err: err} - } - - if r.Compression != nil && strings.Contains(res.Header.Get("Content-Encoding"), r.Compression.ContentEncoding) { - compressedReader, err := r.Compression.reader(res.Body) - if err != nil { - return nil, &Error{Err: err} - } - return &Response{res, resUri, &Body{reader: res.Body, compressedReader: compressedReader}, req}, nil - } - - return &Response{res, resUri, &Body{reader: res.Body}, req}, nil -} - -func (r Request) addHeaders(headersMap http.Header) { - if len(r.UserAgent) > 0 { - headersMap.Add("User-Agent", r.UserAgent) - } - if r.Accept != "" { - headersMap.Add("Accept", r.Accept) - } - if r.ContentType != "" { - headersMap.Add("Content-Type", r.ContentType) - } -} - -func (r Request) NewRequest() (*http.Request, error) { - - b, e := prepareRequestBody(r.Body) - if e != nil { - // there was a problem marshaling the body - return nil, &Error{Err: e} - } - - if r.QueryString != nil { - param, e := paramParse(r.QueryString) - if e != nil { - return nil, &Error{Err: e} - } - r.Uri = r.Uri + "?" + param - } - - var bodyReader io.Reader - if b != nil && r.Compression != nil { - buffer := bytes.NewBuffer([]byte{}) - readBuffer := bufio.NewReader(b) - writer, err := r.Compression.writer(buffer) - if err != nil { - return nil, &Error{Err: err} - } - _, e = readBuffer.WriteTo(writer) - writer.Close() - if e != nil { - return nil, &Error{Err: e} - } - bodyReader = buffer - } else { - bodyReader = b - } - - req, err := http.NewRequest(r.Method, r.Uri, bodyReader) - if err != nil { - return nil, err - } - // add headers to the request - req.Host = r.Host - - r.addHeaders(req.Header) - if r.Compression != nil { - req.Header.Add("Content-Encoding", r.Compression.ContentEncoding) - req.Header.Add("Accept-Encoding", r.Compression.ContentEncoding) - } - if r.headers != nil { - for _, header := range r.headers { - req.Header.Add(header.name, header.value) - } - } - - //use basic auth if required - if r.BasicAuthUsername != "" { - req.SetBasicAuth(r.BasicAuthUsername, r.BasicAuthPassword) - } - - for _, c := range r.cookies { - req.AddCookie(c) - } - return req, nil -} - -// Return value if nonempty, def otherwise. -func valueOrDefault(value, def string) string { - if value != "" { - return value - } - return def -} diff --git a/vendor/github.com/franela/goreq/tags.go b/vendor/github.com/franela/goreq/tags.go deleted file mode 100644 index f3c147e9..00000000 --- a/vendor/github.com/franela/goreq/tags.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found here: https://github.com/golang/go/blob/master/LICENSE - -package goreq - -import ( - "strings" - "unicode" -) - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index ba49e3c2..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,5 +0,0 @@ -root = true - -[*] -indent_style = tab -indent_size = 4 diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore deleted file mode 100644 index 4cd0cbaf..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global - -.vagrant -*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml deleted file mode 100644 index 981d1bb8..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -sudo: false -language: go - -go: - - 1.8.x - - 1.9.x - - tip - -matrix: - allow_failures: - - go: tip - fast_finish: true - -before_script: - - go get -u github.com/golang/lint/golint - -script: - - go test -v --race ./... - -after_script: - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - test -z "$(golint ./... | tee /dev/stderr)" - - go vet ./... - -os: - - linux - - osx - -notifications: - email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 5ab5d41c..00000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,52 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Evan Phoenix -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Nathan Youngman -Nickolai Zeldovich -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md deleted file mode 100644 index be4d7ea2..00000000 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ /dev/null @@ -1,317 +0,0 @@ -# Changelog - -## v1.4.7 / 2018-01-09 - -* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) -* Tests: Fix missing verb on format string (thanks @rchiossi) -* Linux: Fix deadlock in Remove (thanks @aarondl) -* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) -* Docs: Moved FAQ into the README (thanks @vahe) -* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) -* Docs: replace references to OS X with macOS - -## v1.4.2 / 2016-10-10 - -* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) - -## v1.4.1 / 2016-10-04 - -* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) - -## v1.4.0 / 2016-10-01 - -* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) - -## v1.3.1 / 2016-06-28 - -* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) - -## v1.3.0 / 2016-04-19 - -* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) - -## v1.2.10 / 2016-03-02 - -* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) - -## v1.2.9 / 2016-01-13 - -kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) - -## v1.2.8 / 2015-12-17 - -* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) -* inotify: fix race in test -* enable race detection for continuous integration (Linux, Mac, Windows) - -## v1.2.5 / 2015-10-17 - -* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) -* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) -* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) -* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) - -## v1.2.1 / 2015-10-14 - -* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) - -## v1.2.0 / 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) - -## v1.1.1 / 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## v1.1.0 / 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## v1.0.4 / 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## v1.0.3 / 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) - -## v1.0.2 / 2014-08-17 - -* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## v1.0.0 / 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## v0.9.3 / 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## v0.9.2 / 2014-08-17 - -* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## v0.9.1 / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## v0.9.0 / 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## v0.8.12 / 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## v0.8.11 / 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) - -## v0.8.10 / 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## v0.8.9 / 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## v0.8.8 / 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## v0.8.7 / 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## v0.8.6 / 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## v0.8.5 / 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## v0.8.4 / 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## v0.8.3 / 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## v0.8.2 / 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## v0.8.1 / 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## v0.8.0 / 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## v0.7.4 / 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## v0.7.3 / 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## v0.7.2 / 2012-09-01 - -* kqueue: events for created directories - -## v0.7.1 / 2012-07-14 - -* [Fix] for renaming files - -## v0.7.0 / 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## v0.6.0 / 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## v0.5.1 / 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## v0.5.0 / 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## v0.4.0 / 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## v0.3.0 / 2012-02-19 - -* kqueue: add files when watch directory - -## v0.2.0 / 2011-12-30 - -* update to latest Go weekly code - -## v0.1.0 / 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md deleted file mode 100644 index 828a60b2..00000000 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing - -## Issues - -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. - -## Pull Requests - -### Contributor License Agreement - -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). - -Please indicate that you have signed the CLA in your pull request. - -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index f21e5408..00000000 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md deleted file mode 100644 index 39932074..00000000 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# File system notifications for Go - -[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) - -fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: - -```console -go get -u golang.org/x/sys/... -``` - -Cross platform: Windows, Linux, BSD and macOS. - -|Adapter |OS |Status | -|----------|----------|----------| -|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| -|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| -|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| -|fanotify |Linux 2.6.37+ | | -|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| -|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)| - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. - -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. - -Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. - -## Contributing - -Please refer to [CONTRIBUTING][] before opening an issue or pull request. - -## Example - -See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). - -## FAQ - -**When a file is moved to another directory is it still being watched?** - -No (it shouldn't be, unless you are watching where it was moved to). - -**When I watch a directory, are all subdirectories watched as well?** - -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). - -**Do I have to watch the Error and Event channels in a separate goroutine?** - -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) - -**Why am I receiving multiple events for the same file on OS X?** - -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). - -**How many files can be watched at once?** - -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. - -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 - -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md - -## Related Projects - -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) - diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index ced39cb8..00000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index 190bf0de..00000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "errors" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if op&Create == Create { - buffer.WriteString("|CREATE") - } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if op&Write == Write { - buffer.WriteString("|WRITE") - } - if op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - if buffer.Len() == 0 { - return "" - } - return buffer.String()[1:] // Strip leading pipe -} - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) -} - -// Common errors that can be reported by a watcher -var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index d9fd1b88..00000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index cc7db4b2..00000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(0) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 86e76a3d..00000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go deleted file mode 100644 index 7d8de145..00000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go deleted file mode 100644 index 9139e171..00000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 09436f31..00000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml deleted file mode 100644 index 841c4281..00000000 --- a/vendor/github.com/go-openapi/analysis/.codecov.yml +++ /dev/null @@ -1,5 +0,0 @@ -coverage: - status: - patch: - default: - target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore deleted file mode 100644 index 87c3bd3e..00000000 --- a/vendor/github.com/go-openapi/analysis/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -secrets.yml -coverage.out -coverage.txt -*.cov -.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml deleted file mode 100644 index 76af8ab1..00000000 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 40 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - gochecknoinits - # scopelint is useful, but also reports false positives - # that unfortunately can't be disabled. So we disable the - # linter rather than changing code that works. - # see: https://github.com/kyoh86/scopelint/issues/4 - - scopelint diff --git a/vendor/github.com/go-openapi/analysis/.travis.yml b/vendor/github.com/go-openapi/analysis/.travis.yml deleted file mode 100644 index 7ecf865c..00000000 --- a/vendor/github.com/go-openapi/analysis/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/analysis/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md deleted file mode 100644 index efafdf8f..00000000 --- a/vendor/github.com/go-openapi/analysis/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# OpenAPI initiative analysis [![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) [![Build status](https://ci.appveyor.com/api/projects/status/x377t5o9ennm847o/branch/master?svg=true)](https://ci.appveyor.com/project/casualjim/go-openapi/analysis/branch/master) [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/analysis.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) - - -A foundational library to analyze an OAI specification document for easier reasoning about the content. diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go deleted file mode 100644 index 4d98718c..00000000 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ /dev/null @@ -1,970 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - slashpath "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -type referenceAnalysis struct { - schemas map[string]spec.Ref - responses map[string]spec.Ref - parameters map[string]spec.Ref - items map[string]spec.Ref - headerItems map[string]spec.Ref - parameterItems map[string]spec.Ref - allRefs map[string]spec.Ref - pathItems map[string]spec.Ref -} - -func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { - r.allRefs["#"+key] = ref -} - -func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { - r.items["#"+key] = items.Ref - r.addRef(key, items.Ref) - if location == "header" { - // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas - // and $ref are not supported here. However it is possible to analyze this. - r.headerItems["#"+key] = items.Ref - } else { - r.parameterItems["#"+key] = items.Ref - } -} - -func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { - r.schemas["#"+key] = ref.Schema.Ref - r.addRef(key, ref.Schema.Ref) -} - -func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { - r.responses["#"+key] = resp.Ref - r.addRef(key, resp.Ref) -} - -func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { - r.parameters["#"+key] = param.Ref - r.addRef(key, param.Ref) -} - -func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { - r.pathItems["#"+key] = pathItem.Ref - r.addRef(key, pathItem.Ref) -} - -type patternAnalysis struct { - parameters map[string]string - headers map[string]string - items map[string]string - schemas map[string]string - allPatterns map[string]string -} - -func (p *patternAnalysis) addPattern(key, pattern string) { - p.allPatterns["#"+key] = pattern -} - -func (p *patternAnalysis) addParameterPattern(key, pattern string) { - p.parameters["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addHeaderPattern(key, pattern string) { - p.headers["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addItemsPattern(key, pattern string) { - p.items["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addSchemaPattern(key, pattern string) { - p.schemas["#"+key] = pattern - p.addPattern(key, pattern) -} - -type enumAnalysis struct { - parameters map[string][]interface{} - headers map[string][]interface{} - items map[string][]interface{} - schemas map[string][]interface{} - allEnums map[string][]interface{} -} - -func (p *enumAnalysis) addEnum(key string, enum []interface{}) { - p.allEnums["#"+key] = enum -} - -func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { - p.parameters["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { - p.headers["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { - p.items["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { - p.schemas["#"+key] = enum - p.addEnum(key, enum) -} - -// New takes a swagger spec object and returns an analyzed spec document. -// The analyzed document contains a number of indices that make it easier to -// reason about semantics of a swagger specification for use in code generation -// or validation etc. -func New(doc *spec.Swagger) *Spec { - a := &Spec{ - spec: doc, - references: referenceAnalysis{}, - patterns: patternAnalysis{}, - enums: enumAnalysis{}, - } - a.reset() - a.initialize() - return a -} - -// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry -// with a bunch of utility methods to act on the information in the spec. -type Spec struct { - spec *spec.Swagger - consumes map[string]struct{} - produces map[string]struct{} - authSchemes map[string]struct{} - operations map[string]map[string]*spec.Operation - references referenceAnalysis - patterns patternAnalysis - enums enumAnalysis - allSchemas map[string]SchemaRef - allOfs map[string]SchemaRef -} - -func (s *Spec) reset() { - s.consumes = make(map[string]struct{}, 150) - s.produces = make(map[string]struct{}, 150) - s.authSchemes = make(map[string]struct{}, 150) - s.operations = make(map[string]map[string]*spec.Operation, 150) - s.allSchemas = make(map[string]SchemaRef, 150) - s.allOfs = make(map[string]SchemaRef, 150) - s.references.schemas = make(map[string]spec.Ref, 150) - s.references.pathItems = make(map[string]spec.Ref, 150) - s.references.responses = make(map[string]spec.Ref, 150) - s.references.parameters = make(map[string]spec.Ref, 150) - s.references.items = make(map[string]spec.Ref, 150) - s.references.headerItems = make(map[string]spec.Ref, 150) - s.references.parameterItems = make(map[string]spec.Ref, 150) - s.references.allRefs = make(map[string]spec.Ref, 150) - s.patterns.parameters = make(map[string]string, 150) - s.patterns.headers = make(map[string]string, 150) - s.patterns.items = make(map[string]string, 150) - s.patterns.schemas = make(map[string]string, 150) - s.patterns.allPatterns = make(map[string]string, 150) - s.enums.parameters = make(map[string][]interface{}, 150) - s.enums.headers = make(map[string][]interface{}, 150) - s.enums.items = make(map[string][]interface{}, 150) - s.enums.schemas = make(map[string][]interface{}, 150) - s.enums.allEnums = make(map[string][]interface{}, 150) -} - -func (s *Spec) reload() { - s.reset() - s.initialize() -} - -func (s *Spec) initialize() { - for _, c := range s.spec.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range s.spec.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range s.spec.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - for path, pathItem := range s.AllPaths() { - s.analyzeOperations(path, &pathItem) - } - - for name, parameter := range s.spec.Parameters { - refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) - if parameter.Items != nil { - s.analyzeItems("items", parameter.Items, refPref, "parameter") - } - if parameter.In == "body" && parameter.Schema != nil { - s.analyzeSchema("schema", *parameter.Schema, refPref) - } - if parameter.Pattern != "" { - s.patterns.addParameterPattern(refPref, parameter.Pattern) - } - if len(parameter.Enum) > 0 { - s.enums.addParameterEnum(refPref, parameter.Enum) - } - } - - for name, response := range s.spec.Responses { - refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) - for k, v := range response.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - if v.Items != nil { - s.analyzeItems("items", v.Items, hRefPref, "header") - } - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - if len(v.Enum) > 0 { - s.enums.addHeaderEnum(hRefPref, v.Enum) - } - } - if response.Schema != nil { - s.analyzeSchema("schema", *response.Schema, refPref) - } - } - - for name, schema := range s.spec.Definitions { - s.analyzeSchema(name, schema, "/definitions") - } - // TODO: after analyzing all things and flattening schemas etc - // resolve all the collected references to their final representations - // best put in a separate method because this could get expensive -} - -func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { - // TODO: resolve refs here? - // Currently, operations declared via pathItem $ref are known only after expansion - op := pi - if pi.Ref.String() != "" { - key := slashpath.Join("/paths", jsonpointer.Escape(path)) - s.references.addPathItemRef(key, pi) - } - s.analyzeOperation("GET", path, op.Get) - s.analyzeOperation("PUT", path, op.Put) - s.analyzeOperation("POST", path, op.Post) - s.analyzeOperation("PATCH", path, op.Patch) - s.analyzeOperation("DELETE", path, op.Delete) - s.analyzeOperation("HEAD", path, op.Head) - s.analyzeOperation("OPTIONS", path, op.Options) - for i, param := range op.Parameters { - refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - if param.Pattern != "" { - s.patterns.addParameterPattern(refPref, param.Pattern) - } - if len(param.Enum) > 0 { - s.enums.addParameterEnum(refPref, param.Enum) - } - if param.Items != nil { - s.analyzeItems("items", param.Items, refPref, "parameter") - } - if param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } -} - -func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { - if items == nil { - return - } - refPref := slashpath.Join(prefix, name) - s.analyzeItems(name, items.Items, refPref, location) - if items.Ref.String() != "" { - s.references.addItemsRef(refPref, items, location) - } - if items.Pattern != "" { - s.patterns.addItemsPattern(refPref, items.Pattern) - } - if len(items.Enum) > 0 { - s.enums.addItemsEnum(refPref, items.Enum) - } -} - -func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { - if op == nil { - return - } - - for _, c := range op.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range op.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range op.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - if _, ok := s.operations[method]; !ok { - s.operations[method] = make(map[string]*spec.Operation) - } - s.operations[method][path] = op - prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) - for i, param := range op.Parameters { - refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - if param.Pattern != "" { - s.patterns.addParameterPattern(refPref, param.Pattern) - } - if len(param.Enum) > 0 { - s.enums.addParameterEnum(refPref, param.Enum) - } - s.analyzeItems("items", param.Items, refPref, "parameter") - if param.In == "body" && param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } - if op.Responses != nil { - if op.Responses.Default != nil { - refPref := slashpath.Join(prefix, "responses", "default") - if op.Responses.Default.Ref.String() != "" { - s.references.addResponseRef(refPref, op.Responses.Default) - } - for k, v := range op.Responses.Default.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - s.analyzeItems("items", v.Items, hRefPref, "header") - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - } - if op.Responses.Default.Schema != nil { - s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref) - } - } - for k, res := range op.Responses.StatusCodeResponses { - refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, &res) - } - for k, v := range res.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - s.analyzeItems("items", v.Items, hRefPref, "header") - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - if len(v.Enum) > 0 { - s.enums.addHeaderEnum(hRefPref, v.Enum) - } - } - if res.Schema != nil { - s.analyzeSchema("schema", *res.Schema, refPref) - } - } - } -} - -func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) { - refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) - schRef := SchemaRef{ - Name: name, - Schema: &schema, - Ref: spec.MustCreateRef("#" + refURI), - TopLevel: prefix == "/definitions", - } - - s.allSchemas["#"+refURI] = schRef - - if schema.Ref.String() != "" { - s.references.addSchemaRef(refURI, schRef) - } - if schema.Pattern != "" { - s.patterns.addSchemaPattern(refURI, schema.Pattern) - } - if len(schema.Enum) > 0 { - s.enums.addSchemaEnum(refURI, schema.Enum) - } - - for k, v := range schema.Definitions { - s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions")) - } - for k, v := range schema.Properties { - s.analyzeSchema(k, v, slashpath.Join(refURI, "properties")) - } - for k, v := range schema.PatternProperties { - // NOTE: swagger 2.0 does not support PatternProperties. - // However it is possible to analyze this in a schema - s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties")) - } - for i, v := range schema.AllOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) - } - if len(schema.AllOf) > 0 { - s.allOfs["#"+refURI] = schRef - } - for i, v := range schema.AnyOf { - // NOTE: swagger 2.0 does not support anyOf constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) - } - for i, v := range schema.OneOf { - // NOTE: swagger 2.0 does not support oneOf constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) - } - if schema.Not != nil { - // NOTE: swagger 2.0 does not support "not" constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema("not", *schema.Not, refURI) - } - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI) - } - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - // NOTE: swagger 2.0 does not support AdditionalItems. - // However it is possible to analyze this in a schema - s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI) - } - if schema.Items != nil { - if schema.Items.Schema != nil { - s.analyzeSchema("items", *schema.Items.Schema, refURI) - } - for i, sch := range schema.Items.Schemas { - s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) - } - } -} - -// SecurityRequirement is a representation of a security requirement for an operation -type SecurityRequirement struct { - Name string - Scopes []string -} - -// SecurityRequirementsFor gets the security requirements for the operation -func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { - if s.spec.Security == nil && operation.Security == nil { - return nil - } - - schemes := s.spec.Security - if operation.Security != nil { - schemes = operation.Security - } - - result := [][]SecurityRequirement{} - for _, scheme := range schemes { - if len(scheme) == 0 { - // append a zero object for anonymous - result = append(result, []SecurityRequirement{{}}) - continue - } - var reqs []SecurityRequirement - for k, v := range scheme { - if v == nil { - v = []string{} - } - reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) - } - result = append(result, reqs) - } - return result -} - -// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { - result := make(map[string]spec.SecurityScheme) - - for _, v := range requirements { - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - return result -} - -// SecurityDefinitionsFor gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { - requirements := s.SecurityRequirementsFor(operation) - if len(requirements) == 0 { - return nil - } - - result := make(map[string]spec.SecurityScheme) - for _, reqs := range requirements { - for _, v := range reqs { - if v.Name == "" { - // optional requirement - continue - } - if _, ok := result[v.Name]; ok { - // duplicate requirement - continue - } - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - } - return result -} - -// ConsumesFor gets the mediatypes for the operation -func (s *Spec) ConsumesFor(operation *spec.Operation) []string { - - if len(operation.Consumes) == 0 { - cons := make(map[string]struct{}, len(s.spec.Consumes)) - for _, k := range s.spec.Consumes { - cons[k] = struct{}{} - } - return s.structMapKeys(cons) - } - - cons := make(map[string]struct{}, len(operation.Consumes)) - for _, c := range operation.Consumes { - cons[c] = struct{}{} - } - return s.structMapKeys(cons) -} - -// ProducesFor gets the mediatypes for the operation -func (s *Spec) ProducesFor(operation *spec.Operation) []string { - if len(operation.Produces) == 0 { - prod := make(map[string]struct{}, len(s.spec.Produces)) - for _, k := range s.spec.Produces { - prod[k] = struct{}{} - } - return s.structMapKeys(prod) - } - - prod := make(map[string]struct{}, len(operation.Produces)) - for _, c := range operation.Produces { - prod[c] = struct{}{} - } - return s.structMapKeys(prod) -} - -func mapKeyFromParam(param *spec.Parameter) string { - return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) -} - -func fieldNameFromParam(param *spec.Parameter) string { - // TODO: this should be x-go-name - if nm, ok := param.Extensions.GetString("go-name"); ok { - return nm - } - return swag.ToGoName(param.Name) -} - -// ErrorOnParamFunc is a callback function to be invoked -// whenever an error is encountered while resolving references -// on parameters. -// -// This function takes as input the spec.Parameter which triggered the -// error and the error itself. -// -// If the callback function returns false, the calling function should bail. -// -// If it returns true, the calling function should continue evaluating parameters. -// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). -type ErrorOnParamFunc func(spec.Parameter, error) bool - -func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { - for _, param := range parameters { - pr := param - if pr.Ref.String() != "" { - obj, _, err := pr.Ref.GetPointer().Get(s.spec) - if err != nil { - if callmeOnError != nil { - if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { - continue - } - break - } else { - panic(fmt.Sprintf("invalid reference: %q", pr.Ref.String())) - } - } - if objAsParam, ok := obj.(spec.Parameter); ok { - pr = objAsParam - } else { - if callmeOnError != nil { - if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { - continue - } - break - } else { - panic(fmt.Sprintf("resolved reference is not a parameter: %q", pr.Ref.String())) - } - } - } - res[mapKeyFromParam(&pr)] = pr - } -} - -// ParametersFor the specified operation id. -// -// Assumes parameters properly resolve references if any and that -// such references actually resolve to a parameter object. -// Otherwise, panics. -func (s *Spec) ParametersFor(operationID string) []spec.Parameter { - return s.SafeParametersFor(operationID, nil) -} - -// SafeParametersFor the specified operation id. -// -// Does not assume parameters properly resolve references or that -// such references actually resolve to a parameter object. -// -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous -// parameters. If the callback is set to nil, panics upon errors. -func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { - gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { - bag := make(map[string]spec.Parameter) - s.paramsAsMap(pi.Parameters, bag, callmeOnError) - s.paramsAsMap(op.Parameters, bag, callmeOnError) - - var res []spec.Parameter - for _, v := range bag { - res = append(res, v) - } - return res - } - for _, pi := range s.spec.Paths.Paths { - if pi.Get != nil && pi.Get.ID == operationID { - return gatherParams(&pi, pi.Get) - } - if pi.Head != nil && pi.Head.ID == operationID { - return gatherParams(&pi, pi.Head) - } - if pi.Options != nil && pi.Options.ID == operationID { - return gatherParams(&pi, pi.Options) - } - if pi.Post != nil && pi.Post.ID == operationID { - return gatherParams(&pi, pi.Post) - } - if pi.Patch != nil && pi.Patch.ID == operationID { - return gatherParams(&pi, pi.Patch) - } - if pi.Put != nil && pi.Put.ID == operationID { - return gatherParams(&pi, pi.Put) - } - if pi.Delete != nil && pi.Delete.ID == operationID { - return gatherParams(&pi, pi.Delete) - } - } - return nil -} - -// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -// -// Assumes parameters properly resolve references if any and that -// such references actually resolve to a parameter object. -// Otherwise, panics. -func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { - return s.SafeParamsFor(method, path, nil) -} - -// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -// -// Does not assume parameters properly resolve references or that -// such references actually resolve to a parameter object. -// -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous -// parameters. If the callback is set to nil, panics upon errors. -func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { - res := make(map[string]spec.Parameter) - if pi, ok := s.spec.Paths.Paths[path]; ok { - s.paramsAsMap(pi.Parameters, res, callmeOnError) - s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) - } - return res -} - -// OperationForName gets the operation for the given id -func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { - for method, pathItem := range s.operations { - for path, op := range pathItem { - if operationID == op.ID { - return method, path, op, true - } - } - } - return "", "", nil, false -} - -// OperationFor the given method and path -func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { - if mp, ok := s.operations[strings.ToUpper(method)]; ok { - op, fn := mp[path] - return op, fn - } - return nil, false -} - -// Operations gathers all the operations specified in the spec document -func (s *Spec) Operations() map[string]map[string]*spec.Operation { - return s.operations -} - -func (s *Spec) structMapKeys(mp map[string]struct{}) []string { - if len(mp) == 0 { - return nil - } - - result := make([]string, 0, len(mp)) - for k := range mp { - result = append(result, k) - } - return result -} - -// AllPaths returns all the paths in the swagger spec -func (s *Spec) AllPaths() map[string]spec.PathItem { - if s.spec == nil || s.spec.Paths == nil { - return nil - } - return s.spec.Paths.Paths -} - -// OperationIDs gets all the operation ids based on method an dpath -func (s *Spec) OperationIDs() []string { - if len(s.operations) == 0 { - return nil - } - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p, o := range v { - if o.ID != "" { - result = append(result, o.ID) - } else { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - } - return result -} - -// OperationMethodPaths gets all the operation ids based on method an dpath -func (s *Spec) OperationMethodPaths() []string { - if len(s.operations) == 0 { - return nil - } - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p := range v { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - return result -} - -// RequiredConsumes gets all the distinct consumes that are specified in the specification document -func (s *Spec) RequiredConsumes() []string { - return s.structMapKeys(s.consumes) -} - -// RequiredProduces gets all the distinct produces that are specified in the specification document -func (s *Spec) RequiredProduces() []string { - return s.structMapKeys(s.produces) -} - -// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec -func (s *Spec) RequiredSecuritySchemes() []string { - return s.structMapKeys(s.authSchemes) -} - -// SchemaRef is a reference to a schema -type SchemaRef struct { - Name string - Ref spec.Ref - Schema *spec.Schema - TopLevel bool -} - -// SchemasWithAllOf returns schema references to all schemas that are defined -// with an allOf key -func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { - for _, v := range s.allOfs { - result = append(result, v) - } - return -} - -// AllDefinitions returns schema references for all the definitions that were discovered -func (s *Spec) AllDefinitions() (result []SchemaRef) { - for _, v := range s.allSchemas { - result = append(result, v) - } - return -} - -// AllDefinitionReferences returns json refs for all the discovered schemas -func (s *Spec) AllDefinitionReferences() (result []string) { - for _, v := range s.references.schemas { - result = append(result, v.String()) - } - return -} - -// AllParameterReferences returns json refs for all the discovered parameters -func (s *Spec) AllParameterReferences() (result []string) { - for _, v := range s.references.parameters { - result = append(result, v.String()) - } - return -} - -// AllResponseReferences returns json refs for all the discovered responses -func (s *Spec) AllResponseReferences() (result []string) { - for _, v := range s.references.responses { - result = append(result, v.String()) - } - return -} - -// AllPathItemReferences returns the references for all the items -func (s *Spec) AllPathItemReferences() (result []string) { - for _, v := range s.references.pathItems { - result = append(result, v.String()) - } - return -} - -// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). -// -// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid -// Swagger 2.0 spec. -func (s *Spec) AllItemsReferences() (result []string) { - for _, v := range s.references.items { - result = append(result, v.String()) - } - return -} - -// AllReferences returns all the references found in the document, with possible duplicates -func (s *Spec) AllReferences() (result []string) { - for _, v := range s.references.allRefs { - result = append(result, v.String()) - } - return -} - -// AllRefs returns all the unique references found in the document -func (s *Spec) AllRefs() (result []spec.Ref) { - set := make(map[string]struct{}) - for _, v := range s.references.allRefs { - a := v.String() - if a == "" { - continue - } - if _, ok := set[a]; !ok { - set[a] = struct{}{} - result = append(result, v) - } - } - return -} - -func cloneStringMap(source map[string]string) map[string]string { - res := make(map[string]string, len(source)) - for k, v := range source { - res[k] = v - } - return res -} - -func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { - res := make(map[string][]interface{}, len(source)) - for k, v := range source { - res[k] = v - } - return res -} - -// ParameterPatterns returns all the patterns found in parameters -// the map is cloned to avoid accidental changes -func (s *Spec) ParameterPatterns() map[string]string { - return cloneStringMap(s.patterns.parameters) -} - -// HeaderPatterns returns all the patterns found in response headers -// the map is cloned to avoid accidental changes -func (s *Spec) HeaderPatterns() map[string]string { - return cloneStringMap(s.patterns.headers) -} - -// ItemsPatterns returns all the patterns found in simple array items -// the map is cloned to avoid accidental changes -func (s *Spec) ItemsPatterns() map[string]string { - return cloneStringMap(s.patterns.items) -} - -// SchemaPatterns returns all the patterns found in schemas -// the map is cloned to avoid accidental changes -func (s *Spec) SchemaPatterns() map[string]string { - return cloneStringMap(s.patterns.schemas) -} - -// AllPatterns returns all the patterns found in the spec -// the map is cloned to avoid accidental changes -func (s *Spec) AllPatterns() map[string]string { - return cloneStringMap(s.patterns.allPatterns) -} - -// ParameterEnums returns all the enums found in parameters -// the map is cloned to avoid accidental changes -func (s *Spec) ParameterEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.parameters) -} - -// HeaderEnums returns all the enums found in response headers -// the map is cloned to avoid accidental changes -func (s *Spec) HeaderEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.headers) -} - -// ItemsEnums returns all the enums found in simple array items -// the map is cloned to avoid accidental changes -func (s *Spec) ItemsEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.items) -} - -// SchemaEnums returns all the enums found in schemas -// the map is cloned to avoid accidental changes -func (s *Spec) SchemaEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.schemas) -} - -// AllEnums returns all the enums found in the spec -// the map is cloned to avoid accidental changes -func (s *Spec) AllEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.allEnums) -} diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/vendor/github.com/go-openapi/analysis/appveyor.yml deleted file mode 100644 index 3239d744..00000000 --- a/vendor/github.com/go-openapi/analysis/appveyor.yml +++ /dev/null @@ -1,33 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\analysis -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.12 - -test_script: - - go test -v -timeout 20m ./... -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go deleted file mode 100644 index 84cc4e54..00000000 --- a/vendor/github.com/go-openapi/analysis/debug.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "log" - "os" - "path/filepath" - "runtime" -) - -var ( - // Debug is true when the SWAGGER_DEBUG env var is not empty. - // It enables a more verbose logging of the spec analyzer. - Debug = os.Getenv("SWAGGER_DEBUG") != "" - // analysisLogger is a debug logger for this package - analysisLogger *log.Logger -) - -func init() { - debugOptions() -} - -func debugOptions() { - analysisLogger = log.New(os.Stdout, "analysis:", log.LstdFlags) -} - -func debugLog(msg string, args ...interface{}) { - // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() - if Debug { - _, file1, pos1, _ := runtime.Caller(1) - analysisLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go deleted file mode 100644 index d5294c09..00000000 --- a/vendor/github.com/go-openapi/analysis/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package analysis provides methods to work with a Swagger specification document from -package go-openapi/spec. - -Analyzing a specification - -An analysed specification object (type Spec) provides methods to work with swagger definition. - -Flattening or expanding a specification - -Flattening a specification bundles all remote $ref in the main spec document. -Depending on flattening options, additional preprocessing may take place: - - full flattening: replacing all inline complex constructs by a named entry in #/definitions - - expand: replace all $ref's in the document by their expanded content - -Merging several specifications - -Mixin several specifications merges all Swagger constructs, and warns about found conflicts. - -Fixing a specification - -Unmarshalling a specification with golang json unmarshalling may lead to -some unwanted result on present but empty fields. - -Analyzing a Swagger schema - -Swagger schemas are analyzed to determine their complexity and qualify their content. -*/ -package analysis diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go deleted file mode 100644 index bfe014ca..00000000 --- a/vendor/github.com/go-openapi/analysis/fixer.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import "github.com/go-openapi/spec" - -// FixEmptyResponseDescriptions replaces empty ("") response -// descriptions in the input with "(empty)" to ensure that the -// resulting Swagger is stays valid. The problem appears to arise -// from reading in valid specs that have a explicit response -// description of "" (valid, response.description is required), but -// due to zero values being omitted upon re-serializing (omitempty) we -// lose them unless we stick some chars in there. -func FixEmptyResponseDescriptions(s *spec.Swagger) { - if s.Paths != nil { - for _, v := range s.Paths.Paths { - if v.Get != nil { - FixEmptyDescs(v.Get.Responses) - } - if v.Put != nil { - FixEmptyDescs(v.Put.Responses) - } - if v.Post != nil { - FixEmptyDescs(v.Post.Responses) - } - if v.Delete != nil { - FixEmptyDescs(v.Delete.Responses) - } - if v.Options != nil { - FixEmptyDescs(v.Options.Responses) - } - if v.Head != nil { - FixEmptyDescs(v.Head.Responses) - } - if v.Patch != nil { - FixEmptyDescs(v.Patch.Responses) - } - } - } - for k, v := range s.Responses { - FixEmptyDesc(&v) - s.Responses[k] = v - } -} - -// FixEmptyDescs adds "(empty)" as the description for any Response in -// the given Responses object that doesn't already have one. -func FixEmptyDescs(rs *spec.Responses) { - FixEmptyDesc(rs.Default) - for k, v := range rs.StatusCodeResponses { - FixEmptyDesc(&v) - rs.StatusCodeResponses[k] = v - } -} - -// FixEmptyDesc adds "(empty)" as the description to the given -// Response object if it doesn't already have one and isn't a -// ref. No-op on nil input. -func FixEmptyDesc(rs *spec.Response) { - if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { - return - } - rs.Description = "(empty)" -} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go deleted file mode 100644 index ae1eef5d..00000000 --- a/vendor/github.com/go-openapi/analysis/flatten.go +++ /dev/null @@ -1,1732 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "log" - "net/http" - "net/url" - "os" - slashpath "path" - "path/filepath" - "sort" - "strings" - - "strconv" - - "github.com/go-openapi/analysis/internal" - "github.com/go-openapi/jsonpointer" - swspec "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// FlattenOpts configuration for flattening a swagger specification. -type FlattenOpts struct { - Spec *Spec // The analyzed spec to work with - flattenContext *context // Internal context to track flattening activity - - BasePath string - - // Flattening options - Expand bool // If Expand is true, we skip flattening the spec and expand it instead - Minimal bool - Verbose bool - RemoveUnused bool - - /* Extra keys */ - _ struct{} // require keys -} - -// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. -func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *swspec.ExpandOptions { - return &swspec.ExpandOptions{RelativeBase: f.BasePath, SkipSchemas: skipSchemas} -} - -// Swagger gets the swagger specification for this flatten operation -func (f *FlattenOpts) Swagger() *swspec.Swagger { - return f.Spec.spec -} - -// newRef stores information about refs created during the flattening process -type newRef struct { - key string - newName string - path string - isOAIGen bool - resolved bool - schema *swspec.Schema - parents []string -} - -// context stores intermediary results from flatten -type context struct { - newRefs map[string]*newRef - warnings []string - resolved map[string]string -} - -func newContext() *context { - return &context{ - newRefs: make(map[string]*newRef, 150), - warnings: make([]string, 0), - resolved: make(map[string]string, 50), - } -} - -// Flatten an analyzed spec and produce a self-contained spec bundle. -// -// There is a minimal and a full flattening mode. -// -// Minimally flattening a spec means: -// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left -// unscathed) -// - Importing external (http, file) references so they become internal to the document -// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers -// like "$ref": "#/definitions/myObject/allOfs/1") -// -// A minimally flattened spec thus guarantees the following properties: -// - all $refs point to a local definition (i.e. '#/definitions/...') -// - definitions are unique -// -// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they -// represent a complex schema or express commonality in the spec. -// Otherwise, they are simply expanded. -// -// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. -// -// Fully flattening a spec means: -// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. -// -// By complex, we mean every JSON object with some properties. -// Arrays, when they do not define a tuple, -// or empty objects with or without additionalProperties, are not considered complex and remain inline. -// -// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions -// have been created. -// -// Available flattening options: -// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched -// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) -// - Verbose: croaks about name conflicts detected -// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening -// -// NOTE: expansion removes all $ref save circular $ref, which remain in place -// -// TODO: additional options -// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a -// x-go-name extension -// - LiftAllOfs: -// - limit the flattening of allOf members when simple objects -// - merge allOf with validation only -// - merge allOf with extensions only -// - ... -// -func Flatten(opts FlattenOpts) error { - // Make sure opts.BasePath is an absolute path - if !filepath.IsAbs(opts.BasePath) { - cwd, _ := os.Getwd() - opts.BasePath = filepath.Join(cwd, opts.BasePath) - } - // make sure drive letter on windows is normalized to lower case - u, _ := url.Parse(opts.BasePath) - opts.BasePath = u.String() - - opts.flattenContext = newContext() - - // recursively expand responses, parameters, path items and items in simple schemas. - // This simplifies the spec and leaves $ref only into schema objects. - if err := swspec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { - return err - } - - // strip current file from $ref's, so we can recognize them as proper definitions - // In particular, this works around for issue go-openapi/spec#76: leading absolute file in $ref is stripped - if err := normalizeRef(&opts); err != nil { - return err - } - - if opts.RemoveUnused { - // optionally removes shared parameters and responses already expanded (now unused) - // default parameters (i.e. under paths) remain. - opts.Swagger().Parameters = nil - opts.Swagger().Responses = nil - } - - opts.Spec.reload() // re-analyze - - // at this point there are no references left but in schemas - - for imported := false; !imported; { - // iteratively import remote references until none left. - // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") - var err error - if imported, err = importExternalReferences(&opts); err != nil { - return err - } - opts.Spec.reload() // re-analyze - } - - if !opts.Minimal && !opts.Expand { - // full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) - if err := nameInlinedSchemas(&opts); err != nil { - return err - } - - opts.Spec.reload() // re-analyze - } - - // rewrite JSON pointers other than $ref to named definitions - // and attempt to resolve conflicting names whenever possible. - if err := stripPointersAndOAIGen(&opts); err != nil { - return err - } - - if opts.RemoveUnused { - // remove unused definitions - expected := make(map[string]struct{}) - for k := range opts.Swagger().Definitions { - expected[slashpath.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} - } - for _, k := range opts.Spec.AllDefinitionReferences() { - delete(expected, k) - } - for k := range expected { - debugLog("removing unused definition %s", slashpath.Base(k)) - if opts.Verbose { - log.Printf("info: removing unused definition: %s", slashpath.Base(k)) - } - delete(opts.Swagger().Definitions, slashpath.Base(k)) - } - opts.Spec.reload() // re-analyze - } - - // TODO: simplify known schema patterns to flat objects with properties - // examples: - // - lift simple allOf object, - // - empty allOf with validation only or extensions only - // - rework allOf arrays - // - rework allOf additionalProperties - - if opts.Verbose { - // issue notifications - croak(&opts) - } - return nil -} - -// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). -// -// Complex means the schema is any of: -// - a simple type (primitive) -// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) -// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will -// generate a definition) -func isAnalyzedAsComplex(asch *AnalyzedSchema) bool { - if !asch.IsSimpleSchema && !asch.IsArray && !asch.IsMap { - return true - } - return false -} - -// nameInlinedSchemas replaces every complex inline construct by a named definition. -func nameInlinedSchemas(opts *FlattenOpts) error { - debugLog("nameInlinedSchemas") - namer := &inlineSchemaNamer{ - Spec: opts.Swagger(), - Operations: opRefsByRef(gatherOperations(opts.Spec, nil)), - flattenContext: opts.flattenContext, - opts: opts, - } - depthFirst := sortDepthFirst(opts.Spec.allSchemas) - for _, key := range depthFirst { - sch := opts.Spec.allSchemas[key] - if sch.Schema != nil && sch.Schema.Ref.String() == "" && !sch.TopLevel { // inline schema - asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if err != nil { - return fmt.Errorf("schema analysis [%s]: %v", key, err) - } - - if isAnalyzedAsComplex(asch) { // move complex schemas to definitions - if err := namer.Name(key, sch.Schema, asch); err != nil { - return err - } - } - } - } - return nil -} - -var depthGroupOrder = []string{ - "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", -} - -func sortDepthFirst(data map[string]SchemaRef) []string { - // group by category (shared params, op param, statuscode response, default response, definitions) - // sort groups internally by number of parts in the key and lexical names - // flatten groups into a single list of keys - sorted := make([]string, 0, len(data)) - grouped := make(map[string]keys, len(data)) - for k := range data { - split := keyParts(k) - var pk string - if split.IsSharedOperationParam() { - pk = "sharedOpParam" - } - if split.IsOperationParam() { - pk = "opParam" - } - if split.IsStatusCodeResponse() { - pk = "codeResponse" - } - if split.IsDefaultResponse() { - pk = "defaultResponse" - } - if split.IsDefinition() { - pk = "definition" - } - if split.IsSharedParam() { - pk = "sharedParam" - } - if split.IsSharedResponse() { - pk = "sharedResponse" - } - grouped[pk] = append(grouped[pk], key{Segments: len(split), Key: k}) - } - - for _, pk := range depthGroupOrder { - res := grouped[pk] - sort.Sort(res) - for _, v := range res { - sorted = append(sorted, v.Key) - } - } - return sorted -} - -type key struct { - Segments int - Key string -} -type keys []key - -func (k keys) Len() int { return len(k) } -func (k keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } -func (k keys) Less(i, j int) bool { - return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) -} - -type inlineSchemaNamer struct { - Spec *swspec.Swagger - Operations map[string]opRef - flattenContext *context - opts *FlattenOpts -} - -func opRefsByRef(oprefs map[string]opRef) map[string]opRef { - result := make(map[string]opRef, len(oprefs)) - for _, v := range oprefs { - result[v.Ref.String()] = v - } - return result -} - -func (isn *inlineSchemaNamer) Name(key string, schema *swspec.Schema, aschema *AnalyzedSchema) error { - debugLog("naming inlined schema at %s", key) - - parts := keyParts(key) - for _, name := range namesFromKey(parts, aschema, isn.Operations) { - if name != "" { - // create unique name - newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) - - // clone schema - sch, err := cloneSchema(schema) - if err != nil { - return err - } - - // replace values on schema - if err := rewriteSchemaToRef(isn.Spec, key, - swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { - return fmt.Errorf("error while creating definition %q from inline schema: %v", newName, err) - } - - // rewrite any dependent $ref pointing to this place, - // when not already pointing to a top-level definition. - // - // NOTE: this is important if such referers use arbitrary JSON pointers. - an := New(isn.Spec) - for k, v := range an.references.allRefs { - r, _, erd := deepestRef(isn.opts, v) - if erd != nil { - return fmt.Errorf("at %s, %v", k, erd) - } - if r.String() == key || - r.String() == slashpath.Join(definitionsPath, newName) && - slashpath.Dir(v.String()) != definitionsPath { - debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) - - // rewrite $ref to the new target - if err := updateRef(isn.Spec, k, - swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { - return err - } - } - } - - // NOTE: this extension is currently not used by go-swagger (provided for information only) - sch.AddExtension("x-go-gen-location", genLocation(parts)) - - // save cloned schema to definitions - saveSchema(isn.Spec, newName, sch) - - // keep track of created refs - if isn.flattenContext != nil { - debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) - resolved := false - if _, ok := isn.flattenContext.newRefs[key]; ok { - resolved = isn.flattenContext.newRefs[key].resolved - } - isn.flattenContext.newRefs[key] = &newRef{ - key: key, - newName: newName, - path: slashpath.Join(definitionsPath, newName), - isOAIGen: isOAIGen, - resolved: resolved, - schema: sch, - } - } - } - } - return nil -} - -// genLocation indicates from which section of the specification (models or operations) a definition has been created. -// -// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided -// for information only. -func genLocation(parts splitKey) string { - if parts.IsOperation() { - return "operations" - } - if parts.IsDefinition() { - return "models" - } - return "" -} - -// uniqifyName yields a unique name for a definition -func uniqifyName(definitions swspec.Definitions, name string) (string, bool) { - isOAIGen := false - if name == "" { - name = "oaiGen" - isOAIGen = true - } - if len(definitions) == 0 { - return name, isOAIGen - } - - unq := true - for k := range definitions { - if strings.EqualFold(k, name) { - unq = false - break - } - } - - if unq { - return name, isOAIGen - } - - name += "OAIGen" - isOAIGen = true - var idx int - unique := name - _, known := definitions[unique] - for known { - idx++ - unique = fmt.Sprintf("%s%d", name, idx) - _, known = definitions[unique] - } - return unique, isOAIGen -} - -func namesFromKey(parts splitKey, aschema *AnalyzedSchema, operations map[string]opRef) []string { - var baseNames [][]string - var startIndex int - if parts.IsOperation() { - // params - if parts.IsOperationParam() || parts.IsSharedOperationParam() { - piref := parts.PathItemRef() - if piref.String() != "" && parts.IsOperationParam() { - if op, ok := operations[piref.String()]; ok { - startIndex = 5 - baseNames = append(baseNames, []string{op.ID, "params", "body"}) - } - } else if parts.IsSharedOperationParam() { - pref := parts.PathRef() - for k, v := range operations { - if strings.HasPrefix(k, pref.String()) { - startIndex = 4 - baseNames = append(baseNames, []string{v.ID, "params", "body"}) - } - } - } - } - // responses - if parts.IsOperationResponse() { - piref := parts.PathItemRef() - if piref.String() != "" { - if op, ok := operations[piref.String()]; ok { - startIndex = 6 - baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) - } - } - } - } - - // definitions - if parts.IsDefinition() { - nm := parts.DefinitionName() - if nm != "" { - startIndex = 2 - baseNames = append(baseNames, []string{parts.DefinitionName()}) - } - } - - var result []string - for _, segments := range baseNames { - nm := parts.BuildName(segments, startIndex, aschema) - if nm != "" { - result = append(result, nm) - } - } - sort.Strings(result) - return result -} - -const ( - paths = "paths" - responses = "responses" - parameters = "parameters" - definitions = "definitions" - definitionsPath = "#/definitions" -) - -var ( - ignoredKeys map[string]struct{} - validMethods map[string]struct{} -) - -func init() { - ignoredKeys = map[string]struct{}{ - "schema": {}, - "properties": {}, - "not": {}, - "anyOf": {}, - "oneOf": {}, - } - - validMethods = map[string]struct{}{ - "GET": {}, - "HEAD": {}, - "OPTIONS": {}, - "PATCH": {}, - "POST": {}, - "PUT": {}, - "DELETE": {}, - } -} - -type splitKey []string - -func (s splitKey) IsDefinition() bool { - return len(s) > 1 && s[0] == definitions -} - -func (s splitKey) DefinitionName() string { - if !s.IsDefinition() { - return "" - } - return s[1] -} - -func (s splitKey) isKeyName(i int) bool { - if i <= 0 { - return false - } - count := 0 - for idx := i - 1; idx > 0; idx-- { - if s[idx] != "properties" { - break - } - count++ - } - - return count%2 != 0 -} - -func (s splitKey) BuildName(segments []string, startIndex int, aschema *AnalyzedSchema) string { - for i, part := range s[startIndex:] { - if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { - if part == "items" || part == "additionalItems" { - if aschema.IsTuple || aschema.IsTupleWithExtra { - segments = append(segments, "tuple") - } else { - segments = append(segments, "items") - } - if part == "additionalItems" { - segments = append(segments, part) - } - continue - } - segments = append(segments, part) - } - } - return strings.Join(segments, " ") -} - -func (s splitKey) IsOperation() bool { - return len(s) > 1 && s[0] == paths -} - -func (s splitKey) IsSharedOperationParam() bool { - return len(s) > 2 && s[0] == paths && s[2] == parameters -} - -func (s splitKey) IsSharedParam() bool { - return len(s) > 1 && s[0] == parameters -} - -func (s splitKey) IsOperationParam() bool { - return len(s) > 3 && s[0] == paths && s[3] == parameters -} - -func (s splitKey) IsOperationResponse() bool { - return len(s) > 3 && s[0] == paths && s[3] == responses -} - -func (s splitKey) IsSharedResponse() bool { - return len(s) > 1 && s[0] == responses -} - -func (s splitKey) IsDefaultResponse() bool { - return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" -} - -func (s splitKey) IsStatusCodeResponse() bool { - isInt := func() bool { - _, err := strconv.Atoi(s[4]) - return err == nil - } - return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() -} - -func (s splitKey) ResponseName() string { - if s.IsStatusCodeResponse() { - code, _ := strconv.Atoi(s[4]) - return http.StatusText(code) - } - if s.IsDefaultResponse() { - return "Default" - } - return "" -} - -func (s splitKey) PathItemRef() swspec.Ref { - if len(s) < 3 { - return swspec.Ref{} - } - pth, method := s[1], s[2] - if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { - return swspec.Ref{} - } - return swspec.MustCreateRef("#" + slashpath.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) -} - -func (s splitKey) PathRef() swspec.Ref { - if !s.IsOperation() { - return swspec.Ref{} - } - return swspec.MustCreateRef("#" + slashpath.Join("/", paths, jsonpointer.Escape(s[1]))) -} - -func keyParts(key string) splitKey { - var res []string - for _, part := range strings.Split(key[1:], "/") { - if part != "" { - res = append(res, jsonpointer.Unescape(part)) - } - } - return res -} - -func rewriteSchemaToRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { - debugLog("rewriting schema to ref for %s with %s", key, ref.String()) - _, value, err := getPointerFromKey(spec, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *swspec.Schema: - return rewriteParentRef(spec, key, ref) - - case swspec.Schema: - return rewriteParentRef(spec, key, ref) - - case *swspec.SchemaOrArray: - if refable.Schema != nil { - refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - } - - case *swspec.SchemaOrBool: - if refable.Schema != nil { - refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - } - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -func rewriteParentRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { - parent, entry, pvalue, err := getParentFromKey(spec, key) - if err != nil { - return err - } - - debugLog("rewriting holder for %T", pvalue) - switch container := pvalue.(type) { - case swspec.Response: - if err := rewriteParentRef(spec, "#"+parent, ref); err != nil { - return err - } - - case *swspec.Response: - container.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case *swspec.Responses: - statusCode, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", key[1:], err) - } - resp := container.StatusCodeResponses[statusCode] - resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - container.StatusCodeResponses[statusCode] = resp - - case map[string]swspec.Response: - resp := container[entry] - resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - container[entry] = resp - - case swspec.Parameter: - if err := rewriteParentRef(spec, "#"+parent, ref); err != nil { - return err - } - - case map[string]swspec.Parameter: - param := container[entry] - param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - container[entry] = param - - case []swspec.Parameter: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", key[1:], err) - } - param := container[idx] - param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - container[idx] = param - - case swspec.Definitions: - container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case map[string]swspec.Schema: - container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case []swspec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", key[1:], err) - } - container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case *swspec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", key[1:], err) - } - container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) - } - return nil -} - -func cloneSchema(schema *swspec.Schema) (*swspec.Schema, error) { - var sch swspec.Schema - if err := swag.FromDynamicJSON(schema, &sch); err != nil { - return nil, fmt.Errorf("cannot clone schema: %v", err) - } - return &sch, nil -} - -// importExternalReferences iteratively digs remote references and imports them into the main schema. -// -// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. -// -// This returns true when no more remote references can be found. -func importExternalReferences(opts *FlattenOpts) (bool, error) { - debugLog("importExternalReferences") - - groupedRefs := reverseIndexForSchemaRefs(opts) - sortedRefStr := make([]string, 0, len(groupedRefs)) - if opts.flattenContext == nil { - opts.flattenContext = newContext() - } - - // sort $ref resolution to ensure deterministic name conflict resolution - for refStr := range groupedRefs { - sortedRefStr = append(sortedRefStr, refStr) - } - sort.Strings(sortedRefStr) - - complete := true - - for _, refStr := range sortedRefStr { - entry := groupedRefs[refStr] - if entry.Ref.HasFragmentOnly { - continue - } - complete = false - var isOAIGen bool - - newName := opts.flattenContext.resolved[refStr] - if newName != "" { - // rewrite ref with already resolved external ref (useful for cyclical refs): - // rewrite external refs to local ones - debugLog("resolving known ref [%s] to %s", refStr, newName) - for _, key := range entry.Keys { - if err := updateRef(opts.Swagger(), key, - swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { - return false, err - } - } - } else { - // resolve schemas - debugLog("resolving schema from remote $ref [%s]", refStr) - sch, err := swspec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) - if err != nil { - return false, fmt.Errorf("could not resolve schema: %v", err) - } - - // at this stage only $ref analysis matters - partialAnalyzer := &Spec{ - references: referenceAnalysis{}, - patterns: patternAnalysis{}, - enums: enumAnalysis{}, - } - partialAnalyzer.reset() - partialAnalyzer.analyzeSchema("", *sch, "/") - - // now rewrite those refs with rebase - for key, ref := range partialAnalyzer.references.allRefs { - if err := updateRef(sch, key, swspec.MustCreateRef(rebaseRef(entry.Ref.String(), ref.String()))); err != nil { - return false, fmt.Errorf("failed to rewrite ref for key %q at %s: %v", key, entry.Ref.String(), err) - } - } - - // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name - newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) - debugLog("new name for [%s]: %s - with name conflict:%t", - strings.Join(entry.Keys, ", "), newName, isOAIGen) - - opts.flattenContext.resolved[refStr] = newName - - // rewrite the external refs to local ones - for _, key := range entry.Keys { - if err := updateRef(opts.Swagger(), key, - swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { - return false, err - } - - // keep track of created refs - resolved := false - if _, ok := opts.flattenContext.newRefs[key]; ok { - resolved = opts.flattenContext.newRefs[key].resolved - } - opts.flattenContext.newRefs[key] = &newRef{ - key: key, - newName: newName, - path: slashpath.Join(definitionsPath, newName), - isOAIGen: isOAIGen, - resolved: resolved, - schema: sch, - } - } - - // add the resolved schema to the definitions - saveSchema(opts.Swagger(), newName, sch) - } - } - // maintains ref index entries - for k := range opts.flattenContext.newRefs { - r := opts.flattenContext.newRefs[k] - - // update tracking with resolved schemas - if r.schema.Ref.String() != "" { - ref := swspec.MustCreateRef(r.path) - sch, err := swspec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) - if err != nil { - return false, fmt.Errorf("could not resolve schema: %v", err) - } - r.schema = sch - } - // update tracking with renamed keys: got a cascade of refs - if r.path != k { - renamed := *r - renamed.key = r.path - opts.flattenContext.newRefs[renamed.path] = &renamed - - // indirect ref - r.newName = slashpath.Base(k) - r.schema = swspec.RefSchema(r.path) - r.path = k - r.isOAIGen = strings.Contains(k, "OAIGen") - } - } - - return complete, nil -} - -type refRevIdx struct { - Ref swspec.Ref - Keys []string -} - -// rebaseRef rebase a remote ref relative to a base ref. -// -// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). -// -// NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences -func rebaseRef(baseRef string, ref string) string { - debugLog("rebasing ref: %s onto %s", ref, baseRef) - baseRef, _ = url.PathUnescape(baseRef) - ref, _ = url.PathUnescape(ref) - if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { - return ref - } - - parts := strings.Split(ref, "#") - - baseParts := strings.Split(baseRef, "#") - baseURL, _ := url.Parse(baseParts[0]) - if strings.HasPrefix(ref, "#") { - if baseURL.Host == "" { - return strings.Join([]string{baseParts[0], parts[1]}, "#") - } - return strings.Join([]string{baseParts[0], parts[1]}, "#") - } - - refURL, _ := url.Parse(parts[0]) - if refURL.Host != "" || filepath.IsAbs(parts[0]) { - // not rebasing an absolute path - return ref - } - - // there is a relative path - var basePath string - if baseURL.Host != "" { - // when there is a host, standard URI rules apply (with "/") - baseURL.Path = slashpath.Dir(baseURL.Path) - baseURL.Path = slashpath.Join(baseURL.Path, "/"+parts[0]) - return baseURL.String() - } - - // this is a local relative path - // basePart[0] and parts[0] are local filesystem directories/files - basePath = filepath.Dir(baseParts[0]) - relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) - if len(parts) > 1 { - return strings.Join([]string{relPath, parts[1]}, "#") - } - return relPath -} - -// normalizePath renders absolute path on remote file refs -// -// NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences -func normalizePath(ref swspec.Ref, opts *FlattenOpts) (normalizedPath string) { - uri, _ := url.PathUnescape(ref.String()) - if ref.HasFragmentOnly || filepath.IsAbs(uri) { - normalizedPath = uri - return - } - - refURL, _ := url.Parse(uri) - if refURL.Host != "" { - normalizedPath = uri - return - } - - parts := strings.Split(uri, "#") - // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage - parts[0] = filepath.Join(filepath.Dir(opts.BasePath), parts[0]) - normalizedPath = strings.Join(parts, "#") - return -} - -func reverseIndexForSchemaRefs(opts *FlattenOpts) map[string]refRevIdx { - collected := make(map[string]refRevIdx) - for key, schRef := range opts.Spec.references.schemas { - // normalize paths before sorting, - // so we get together keys in same external file - normalizedPath := normalizePath(schRef, opts) - if entry, ok := collected[normalizedPath]; ok { - entry.Keys = append(entry.Keys, key) - collected[normalizedPath] = entry - } else { - collected[normalizedPath] = refRevIdx{ - Ref: schRef, - Keys: []string{key}, - } - } - } - return collected -} - -func nameFromRef(ref swspec.Ref) string { - u := ref.GetURL() - if u.Fragment != "" { - return swag.ToJSONName(slashpath.Base(u.Fragment)) - } - if u.Path != "" { - bn := slashpath.Base(u.Path) - if bn != "" && bn != "/" { - ext := slashpath.Ext(bn) - if ext != "" { - return swag.ToJSONName(bn[:len(bn)-len(ext)]) - } - return swag.ToJSONName(bn) - } - } - return swag.ToJSONName(strings.Replace(u.Host, ".", " ", -1)) -} - -func saveSchema(spec *swspec.Swagger, name string, schema *swspec.Schema) { - if schema == nil { - return - } - if spec.Definitions == nil { - spec.Definitions = make(map[string]swspec.Schema, 150) - } - spec.Definitions[name] = *schema -} - -// getPointerFromKey retrieves the content of the JSON pointer "key" -func getPointerFromKey(spec interface{}, key string) (string, interface{}, error) { - switch spec.(type) { - case *swspec.Schema: - case *swspec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - if key == "#/" { - return "", spec, nil - } - // unescape chars in key, e.g. "{}" from path params - pth, _ := internal.PathUnescape(key[1:]) - ptr, err := jsonpointer.New(pth) - if err != nil { - return "", nil, err - } - - value, _, err := ptr.Get(spec) - if err != nil { - debugLog("error when getting key: %s with path: %s", key, pth) - return "", nil, err - } - return pth, value, nil -} - -// getParentFromKey retrieves the container of the JSON pointer "key" -func getParentFromKey(spec interface{}, key string) (string, string, interface{}, error) { - switch spec.(type) { - case *swspec.Schema: - case *swspec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - // unescape chars in key, e.g. "{}" from path params - pth, _ := internal.PathUnescape(key[1:]) - - parent, entry := slashpath.Dir(pth), slashpath.Base(pth) - debugLog("getting schema holder at: %s, with entry: %s", parent, entry) - - pptr, err := jsonpointer.New(parent) - if err != nil { - return "", "", nil, err - } - pvalue, _, err := pptr.Get(spec) - if err != nil { - return "", "", nil, fmt.Errorf("can't get parent for %s: %v", parent, err) - } - return parent, entry, pvalue, nil -} - -// updateRef replaces a ref by another one -func updateRef(spec interface{}, key string, ref swspec.Ref) error { - switch spec.(type) { - case *swspec.Schema: - case *swspec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - debugLog("updating ref for %s with %s", key, ref.String()) - pth, value, err := getPointerFromKey(spec, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *swspec.Schema: - refable.Ref = ref - case *swspec.SchemaOrArray: - if refable.Schema != nil { - refable.Schema.Ref = ref - } - case *swspec.SchemaOrBool: - if refable.Schema != nil { - refable.Schema.Ref = ref - } - case swspec.Schema: - debugLog("rewriting holder for %T", refable) - _, entry, pvalue, erp := getParentFromKey(spec, key) - if erp != nil { - return err - } - switch container := pvalue.(type) { - case swspec.Definitions: - container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case map[string]swspec.Schema: - container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case []swspec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", pth, err) - } - container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case *swspec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", pth, err) - } - container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled container type at %s: %T", key, value) - } - - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -// updateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) -func updateRefWithSchema(spec *swspec.Swagger, key string, sch *swspec.Schema) error { - debugLog("updating ref for %s with schema", key) - pth, value, err := getPointerFromKey(spec, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *swspec.Schema: - *refable = *sch - case swspec.Schema: - _, entry, pvalue, erp := getParentFromKey(spec, key) - if erp != nil { - return err - } - switch container := pvalue.(type) { - case swspec.Definitions: - container[entry] = *sch - - case map[string]swspec.Schema: - container[entry] = *sch - - case []swspec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", pth, err) - } - container[idx] = *sch - - case *swspec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", pth, err) - } - container.Schemas[idx] = *sch - - // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) - } - case *swspec.SchemaOrArray: - *refable.Schema = *sch - // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema - case *swspec.SchemaOrBool: - *refable.Schema = *sch - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -func containsString(names []string, name string) bool { - for _, nm := range names { - if nm == name { - return true - } - } - return false -} - -type opRef struct { - Method string - Path string - Key string - ID string - Op *swspec.Operation - Ref swspec.Ref -} - -type opRefs []opRef - -func (o opRefs) Len() int { return len(o) } -func (o opRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o opRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } - -func gatherOperations(specDoc *Spec, operationIDs []string) map[string]opRef { - var oprefs opRefs - - for method, pathItem := range specDoc.Operations() { - for pth, operation := range pathItem { - vv := *operation - oprefs = append(oprefs, opRef{ - Key: swag.ToGoName(strings.ToLower(method) + " " + pth), - Method: method, - Path: pth, - ID: vv.ID, - Op: &vv, - Ref: swspec.MustCreateRef("#" + slashpath.Join("/paths", jsonpointer.Escape(pth), method)), - }) - } - } - - sort.Sort(oprefs) - - operations := make(map[string]opRef) - for _, opr := range oprefs { - nm := opr.ID - if nm == "" { - nm = opr.Key - } - - oo, found := operations[nm] - if found && oo.Method != opr.Method && oo.Path != opr.Path { - nm = opr.Key - } - if len(operationIDs) == 0 || containsString(operationIDs, opr.ID) || containsString(operationIDs, nm) { - opr.ID = nm - opr.Op.ID = nm - operations[nm] = opr - } - } - return operations -} - -// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. -// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. -func stripPointersAndOAIGen(opts *FlattenOpts) error { - // name all JSON pointers to anonymous documents - if err := namePointers(opts); err != nil { - return err - } - - // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) - hasIntroducedPointerOrInline, ers := stripOAIGen(opts) - if ers != nil { - return ers - } - - // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers - for hasIntroducedPointerOrInline { - if !opts.Minimal { - opts.Spec.reload() // re-analyze - if err := nameInlinedSchemas(opts); err != nil { - return err - } - } - - if err := namePointers(opts); err != nil { - return err - } - - // restrip - if hasIntroducedPointerOrInline, ers = stripOAIGen(opts); ers != nil { - return ers - } - - opts.Spec.reload() // re-analyze - } - return nil -} - -// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. -// -// A dedupe is deemed unnecessary whenever: -// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) -// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to -// the first parent. -// -// This function returns a true bool whenever it re-inlined a complex schema, so the caller may chose to iterate -// pointer and name resolution again. -func stripOAIGen(opts *FlattenOpts) (bool, error) { - debugLog("stripOAIGen") - replacedWithComplex := false - - // figure out referers of OAIGen definitions - for _, r := range opts.flattenContext.newRefs { - if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) - continue - } - for k, v := range opts.Spec.references.allRefs { - if r.path != v.String() { - continue - } - found := false - for _, p := range r.parents { - if p == k { - found = true - break - } - } - if !found { - r.parents = append(r.parents, k) - } - } - } - - for k := range opts.flattenContext.newRefs { - r := opts.flattenContext.newRefs[k] - //debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", - // k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) - if r.isOAIGen && len(r.parents) >= 1 { - pr := r.parents - sort.Strings(pr) - - // rewrite first parent schema in lexicographical order - debugLog("rewrite first parent in lex order %s with schema", pr[0]) - if err := updateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { - return false, err - } - if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { - // update parent in ref index entry - debugLog("update parent entry: %s", pr[0]) - pa.schema = r.schema - pa.resolved = false - replacedWithComplex = true - } - - // rewrite other parents to point to first parent - if len(pr) > 1 { - for _, p := range pr[1:] { - replacingRef := swspec.MustCreateRef(pr[0]) - - // set complex when replacing ref is an anonymous jsonpointer: further processing may be required - replacedWithComplex = replacedWithComplex || - slashpath.Dir(replacingRef.String()) != definitionsPath - debugLog("rewrite parent with ref: %s", replacingRef.String()) - - // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). - // Those are stripped later on. - if err := updateRef(opts.Swagger(), p, replacingRef); err != nil { - return false, err - } - - if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { - // update parent in ref index - debugLog("update parent entry: %s", p) - pa.schema = r.schema - pa.resolved = false - replacedWithComplex = true - } - } - } - - // remove OAIGen definition - debugLog("removing definition %s", slashpath.Base(r.path)) - delete(opts.Swagger().Definitions, slashpath.Base(r.path)) - - // propagate changes in ref index for keys which have this one as a parent - for kk, value := range opts.flattenContext.newRefs { - if kk == k || !value.isOAIGen || value.resolved { - continue - } - found := false - newParents := make([]string, 0, len(value.parents)) - for _, parent := range value.parents { - switch { - case parent == r.path: - found = true - parent = pr[0] - case strings.HasPrefix(parent, r.path+"/"): - found = true - parent = slashpath.Join(pr[0], strings.TrimPrefix(parent, r.path)) - } - newParents = append(newParents, parent) - } - if found { - value.parents = newParents - } - } - - // mark naming conflict as resolved - debugLog("marking naming conflict resolved for key: %s", r.key) - opts.flattenContext.newRefs[r.key].isOAIGen = false - opts.flattenContext.newRefs[r.key].resolved = true - - // determine if the previous substitution did inline a complex schema - if r.schema != nil && r.schema.Ref.String() == "" { // inline schema - asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if err != nil { - return false, err - } - debugLog("re-inlined schema: parent: %s, %t", pr[0], isAnalyzedAsComplex(asch)) - replacedWithComplex = replacedWithComplex || - !(slashpath.Dir(pr[0]) == definitionsPath) && isAnalyzedAsComplex(asch) - } - } - } - - debugLog("replacedWithComplex: %t", replacedWithComplex) - opts.Spec.reload() // re-analyze - return replacedWithComplex, nil -} - -// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting -// from flattening a spec -func croak(opts *FlattenOpts) { - reported := make(map[string]bool, len(opts.flattenContext.newRefs)) - for _, v := range opts.Spec.references.allRefs { - // warns about duplicate handling - for _, r := range opts.flattenContext.newRefs { - if r.isOAIGen && r.path == v.String() { - reported[r.newName] = true - } - } - } - for k := range reported { - log.Printf("warning: duplicate flattened definition name resolved as %s", k) - } - // warns about possible type mismatches - uniqueMsg := make(map[string]bool) - for _, msg := range opts.flattenContext.warnings { - if _, ok := uniqueMsg[msg]; ok { - continue - } - log.Printf("warning: %s", msg) - uniqueMsg[msg] = true - } -} - -// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. -// -// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. -// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). -func namePointers(opts *FlattenOpts) error { - debugLog("name pointers") - refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) - for k, ref := range opts.Spec.references.allRefs { - if slashpath.Dir(ref.String()) == definitionsPath { - // this a ref to a top-level definition: ok - continue - } - replacingRef, sch, erd := deepestRef(opts, ref) - if erd != nil { - return fmt.Errorf("at %s, %v", k, erd) - } - debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) - refsToReplace[k] = SchemaRef{ - Name: k, // caller - Ref: replacingRef, // callee - Schema: sch, - TopLevel: slashpath.Dir(replacingRef.String()) == definitionsPath, - } - } - depthFirst := sortDepthFirst(refsToReplace) - namer := &inlineSchemaNamer{ - Spec: opts.Swagger(), - Operations: opRefsByRef(gatherOperations(opts.Spec, nil)), - flattenContext: opts.flattenContext, - opts: opts, - } - - for _, key := range depthFirst { - v := refsToReplace[key] - // update current replacement, which may have been updated by previous changes of deeper elements - replacingRef, sch, erd := deepestRef(opts, v.Ref) - if erd != nil { - return fmt.Errorf("at %s, %v", key, erd) - } - v.Ref = replacingRef - v.Schema = sch - v.TopLevel = slashpath.Dir(replacingRef.String()) == definitionsPath - debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) - - if v.TopLevel { - debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) - // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref - if err := updateRef(opts.Swagger(), key, v.Ref); err != nil { - return err - } - } else { - // this is a JSON pointer to an anonymous document (internal or external): - // create a definition for this schema when: - // - it is a complex schema - // - or it is pointed by more than one $ref (i.e. expresses commonality) - // otherwise, expand the pointer (single reference to a simple type) - // - // The named definition for this follows the target's key, not the caller's - debugLog("namePointers at %s for %s", key, v.Ref.String()) - - // qualify the expanded schema - /* - if key == "#/paths/~1some~1where~1{id}/get/parameters/1/items" { - // DEBUG - //func getPointerFromKey(spec interface{}, key string) (string, interface{}, error) { - k, res, err := getPointerFromKey(namer.Spec, key) - debugLog("k = %s, res=%#v, err=%v", k, res, err) - } - */ - asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if ers != nil { - return fmt.Errorf("schema analysis [%s]: %v", key, ers) - } - callers := make([]string, 0, 64) - - debugLog("looking for callers") - an := New(opts.Swagger()) - for k, w := range an.references.allRefs { - r, _, erd := deepestRef(opts, w) - if erd != nil { - return fmt.Errorf("at %s, %v", key, erd) - } - if r.String() == v.Ref.String() { - callers = append(callers, k) - } - } - debugLog("callers for %s: %d", v.Ref.String(), len(callers)) - if len(callers) == 0 { - // has already been updated and resolved - continue - } - - parts := keyParts(v.Ref.String()) - debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) - // identifying edge case when the namer did nothing because we point to a non-schema object - // no definition is created and we expand the $ref for all callers - if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { - debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) - if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { - return err - } - - // regular case: we named the $ref as a definition, and we move all callers to this new $ref - for _, caller := range callers { - if caller != key { - // move $ref for next to resolve - debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) - c := refsToReplace[caller] - c.Ref = v.Ref - refsToReplace[caller] = c - } - } - } else { - debugLog("expand JSON pointer for key=%s", key) - if err := updateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { - return err - } - // NOTE: there is no other caller to update - } - } - } - opts.Spec.reload() // re-analyze - return nil -} - -// deepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. -// - if no definition is found, returns the deepest ref. -// - pointers to external files are expanded -// -// NOTE: all external $ref's are assumed to be already expanded at this stage. -func deepestRef(opts *FlattenOpts, ref swspec.Ref) (swspec.Ref, *swspec.Schema, error) { - if !ref.HasFragmentOnly { - // we found an external $ref, which is odd - // does nothing on external $refs - return ref, nil, nil - } - currentRef := ref - visited := make(map[string]bool, 64) -DOWNREF: - for currentRef.String() != "" { - if slashpath.Dir(currentRef.String()) == definitionsPath { - // this is a top-level definition: stop here and return this ref - return currentRef, nil, nil - } - if _, beenThere := visited[currentRef.String()]; beenThere { - return swspec.Ref{}, nil, - fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) - } - visited[currentRef.String()] = true - value, _, err := currentRef.GetPointer().Get(opts.Swagger()) - if err != nil { - return swspec.Ref{}, nil, err - } - switch refable := value.(type) { - case *swspec.Schema: - if refable.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Ref - - case swspec.Schema: - if refable.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Ref - - case *swspec.SchemaOrArray: - if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Schema.Ref - - case *swspec.SchemaOrBool: - if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Schema.Ref - - case swspec.Response: - // a pointer points to a schema initially marshalled in responses section... - // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() - var asSchema swspec.Schema - err := asSchema.UnmarshalJSON(asJSON) - if err != nil { - return swspec.Ref{}, nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) - - } - opts.flattenContext.warnings = append(opts.flattenContext.warnings, - fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - - case swspec.Parameter: - // a pointer points to a schema initially marshalled in parameters section... - // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() - var asSchema swspec.Schema - err := asSchema.UnmarshalJSON(asJSON) - if err != nil { - return swspec.Ref{}, nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) - - } - opts.flattenContext.warnings = append(opts.flattenContext.warnings, - fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - - default: - return swspec.Ref{}, nil, - fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T", - currentRef.String(), value) - - } - } - // assess what schema we're ending with - sch, erv := swspec.ResolveRefWithBase(opts.Swagger(), ¤tRef, opts.ExpandOpts(false)) - if erv != nil { - return swspec.Ref{}, nil, erv - } - if sch == nil { - return swspec.Ref{}, nil, fmt.Errorf("no schema found at %s", currentRef.String()) - } - return currentRef, sch, nil -} - -// normalizeRef strips the current file from any $ref. This works around issue go-openapi/spec#76: -// leading absolute file in $ref is stripped -func normalizeRef(opts *FlattenOpts) error { - debugLog("normalizeRef") - opts.Spec.reload() // re-analyze - for k, w := range opts.Spec.references.allRefs { - if strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS - // strip base path from definition - debugLog("stripping absolute path for: %s", w.String()) - if err := updateRef(opts.Swagger(), k, - swspec.MustCreateRef(slashpath.Join(definitionsPath, slashpath.Base(w.String())))); err != nil { - return err - } - } - } - opts.Spec.reload() // re-analyze - return nil -} diff --git a/vendor/github.com/go-openapi/analysis/go.mod b/vendor/github.com/go-openapi/analysis/go.mod deleted file mode 100644 index 6c8e5857..00000000 --- a/vendor/github.com/go-openapi/analysis/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/go-openapi/analysis - -require ( - github.com/go-openapi/jsonpointer v0.19.3 - github.com/go-openapi/loads v0.19.0 - github.com/go-openapi/spec v0.19.3 - github.com/go-openapi/strfmt v0.19.3 - github.com/go-openapi/swag v0.19.5 - github.com/stretchr/testify v1.3.0 - go.mongodb.org/mongo-driver v1.1.1 // indirect -) - -go 1.13 diff --git a/vendor/github.com/go-openapi/analysis/go.sum b/vendor/github.com/go-openapi/analysis/go.sum deleted file mode 100644 index 8e8b5f9b..00000000 --- a/vendor/github.com/go-openapi/analysis/go.sum +++ /dev/null @@ -1,97 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/loads v0.19.0 h1:wCOBNscACI8L93tt5tvB2zOMkJ098XCw3fP0BY2ybDA= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.2 h1:clPGfBnJohokno0e+d7hs6Yocrzjlgz6EsQSDncCRnE= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/analysis/internal/post_go18.go b/vendor/github.com/go-openapi/analysis/internal/post_go18.go deleted file mode 100644 index f96f55c0..00000000 --- a/vendor/github.com/go-openapi/analysis/internal/post_go18.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build go1.8 - -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import "net/url" - -// PathUnescape provides url.PathUnescape(), with seamless -// go version support for pre-go1.8 -// -// TODO: this function is currently defined in go-openapi/swag, -// but unexported. We might chose to export it, or simple phase -// out pre-go1.8 support. -func PathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} diff --git a/vendor/github.com/go-openapi/analysis/internal/pre_go18.go b/vendor/github.com/go-openapi/analysis/internal/pre_go18.go deleted file mode 100644 index 4cc64418..00000000 --- a/vendor/github.com/go-openapi/analysis/internal/pre_go18.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !go1.8 - -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import "net/url" - -// PathUnescape provides url.PathUnescape(), with seamless -// go version support for pre-go1.8 -// -// TODO: this function is currently defined in go-openapi/swag, -// but unexported. We might chose to export it, or simple phase -// out pre-go1.8 support. -func PathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go deleted file mode 100644 index 625c46f8..00000000 --- a/vendor/github.com/go-openapi/analysis/mixin.go +++ /dev/null @@ -1,425 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "reflect" - - "github.com/go-openapi/spec" -) - -// Mixin modifies the primary swagger spec by adding the paths and -// definitions from the mixin specs. Top level parameters and -// responses from the mixins are also carried over. Operation id -// collisions are avoided by appending "Mixin" but only if -// needed. -// -// The following parts of primary are subject to merge, filling empty details -// - Info -// - BasePath -// - Host -// - ExternalDocs -// -// Consider calling FixEmptyResponseDescriptions() on the modified primary -// if you read them from storage and they are valid to start with. -// -// Entries in "paths", "definitions", "parameters" and "responses" are -// added to the primary in the order of the given mixins. If the entry -// already exists in primary it is skipped with a warning message. -// -// The count of skipped entries (from collisions) is returned so any -// deviation from the number expected can flag a warning in your build -// scripts. Carefully review the collisions before accepting them; -// consider renaming things if possible. -// -// No key normalization takes place (paths, type defs, -// etc). Ensure they are canonical if your downstream tools do -// key normalization of any form. -// -// Merging schemes (http, https), and consumers/producers do not account for -// collisions. -func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { - skipped := make([]string, 0, len(mixins)) - opIds := getOpIds(primary) - initPrimary(primary) - - for i, m := range mixins { - skipped = append(skipped, mergeSwaggerProps(primary, m)...) - - skipped = append(skipped, mergeConsumes(primary, m)...) - - skipped = append(skipped, mergeProduces(primary, m)...) - - skipped = append(skipped, mergeTags(primary, m)...) - - skipped = append(skipped, mergeSchemes(primary, m)...) - - skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) - - skipped = append(skipped, mergeSecurityRequirements(primary, m)...) - - skipped = append(skipped, mergeDefinitions(primary, m)...) - - // merging paths requires a map of operationIDs to work with - skipped = append(skipped, mergePaths(primary, m, opIds, i)...) - - skipped = append(skipped, mergeParameters(primary, m)...) - - skipped = append(skipped, mergeResponses(primary, m)...) - } - return skipped -} - -// getOpIds extracts all the paths..operationIds from the given -// spec and returns them as the keys in a map with 'true' values. -func getOpIds(s *spec.Swagger) map[string]bool { - rv := make(map[string]bool) - if s.Paths == nil { - return rv - } - for _, v := range s.Paths.Paths { - piops := pathItemOps(v) - for _, op := range piops { - rv[op.ID] = true - } - } - return rv -} - -func pathItemOps(p spec.PathItem) []*spec.Operation { - var rv []*spec.Operation - rv = appendOp(rv, p.Get) - rv = appendOp(rv, p.Put) - rv = appendOp(rv, p.Post) - rv = appendOp(rv, p.Delete) - rv = appendOp(rv, p.Head) - rv = appendOp(rv, p.Patch) - return rv -} - -func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { - if op == nil { - return ops - } - return append(ops, op) -} - -func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.SecurityDefinitions { - if _, exists := primary.SecurityDefinitions[k]; exists { - warn := fmt.Sprintf( - "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - continue - } - primary.SecurityDefinitions[k] = v - } - return -} - -func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for _, v := range m.Security { - found := false - for _, vv := range primary.Security { - if reflect.DeepEqual(v, vv) { - found = true - break - } - } - if found { - warn := fmt.Sprintf( - "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) - skipped = append(skipped, warn) - continue - } - primary.Security = append(primary.Security, v) - } - return -} - -func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Definitions { - // assume name collisions represent IDENTICAL type. careful. - if _, exists := primary.Definitions[k]; exists { - warn := fmt.Sprintf( - "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - continue - } - primary.Definitions[k] = v - } - return -} - -func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) { - if m.Paths != nil { - for k, v := range m.Paths.Paths { - if _, exists := primary.Paths.Paths[k]; exists { - warn := fmt.Sprintf( - "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - continue - } - - // Swagger requires that operationIds be - // unique within a spec. If we find a - // collision we append "Mixin0" to the - // operatoinId we are adding, where 0 is mixin - // index. We assume that operationIds with - // all the proivded specs are already unique. - piops := pathItemOps(v) - for _, piop := range piops { - if opIds[piop.ID] { - piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) - } - opIds[piop.ID] = true - } - primary.Paths.Paths[k] = v - } - } - return -} - -func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Parameters { - // could try to rename on conflict but would - // have to fix $refs in the mixin. Complain - // for now - if _, exists := primary.Parameters[k]; exists { - warn := fmt.Sprintf( - "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - continue - } - primary.Parameters[k] = v - } - return -} - -func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Responses { - // could try to rename on conflict but would - // have to fix $refs in the mixin. Complain - // for now - if _, exists := primary.Responses[k]; exists { - warn := fmt.Sprintf( - "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - continue - } - primary.Responses[k] = v - } - return skipped -} - -func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Consumes { - found := false - for _, vv := range primary.Consumes { - if v == vv { - found = true - break - } - } - if found { - // no warning here: we just skip it - continue - } - primary.Consumes = append(primary.Consumes, v) - } - return []string{} -} - -func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Produces { - found := false - for _, vv := range primary.Produces { - if v == vv { - found = true - break - } - } - if found { - // no warning here: we just skip it - continue - } - primary.Produces = append(primary.Produces, v) - } - return []string{} -} - -func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for _, v := range m.Tags { - found := false - for _, vv := range primary.Tags { - if v.Name == vv.Name { - found = true - break - } - } - if found { - warn := fmt.Sprintf( - "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", v.Name) - skipped = append(skipped, warn) - continue - } - primary.Tags = append(primary.Tags, v) - } - return -} - -func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Schemes { - found := false - for _, vv := range primary.Schemes { - if v == vv { - found = true - break - } - } - if found { - // no warning here: we just skip it - continue - } - primary.Schemes = append(primary.Schemes, v) - } - return []string{} -} - -func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { - var skipped []string - primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) - - // merging details in swagger top properties - if primary.Host == "" { - primary.Host = m.Host - } - if primary.BasePath == "" { - primary.BasePath = m.BasePath - } - if primary.Info == nil { - primary.Info = m.Info - } else if m.Info != nil { - var sk []string - primary.Info.Extensions, sk = mergeExtensions(primary.Info.Extensions, m.Info.Extensions) - skipped = append(skipped, sk...) - if primary.Info.Description == "" { - primary.Info.Description = m.Info.Description - } - if primary.Info.Title == "" { - primary.Info.Description = m.Info.Description - } - if primary.Info.TermsOfService == "" { - primary.Info.TermsOfService = m.Info.TermsOfService - } - if primary.Info.Version == "" { - primary.Info.Version = m.Info.Version - } - - if primary.Info.Contact == nil { - primary.Info.Contact = m.Info.Contact - } else if m.Info.Contact != nil { - if primary.Info.Contact.Name == "" { - primary.Info.Contact.Name = m.Info.Contact.Name - } - if primary.Info.Contact.URL == "" { - primary.Info.Contact.URL = m.Info.Contact.URL - } - if primary.Info.Contact.Email == "" { - primary.Info.Contact.Email = m.Info.Contact.Email - } - } - - if primary.Info.License == nil { - primary.Info.License = m.Info.License - } else if m.Info.License != nil { - if primary.Info.License.Name == "" { - primary.Info.License.Name = m.Info.License.Name - } - if primary.Info.License.URL == "" { - primary.Info.License.URL = m.Info.License.URL - } - } - - } - if primary.ExternalDocs == nil { - primary.ExternalDocs = m.ExternalDocs - } else if m.ExternalDocs != nil { - if primary.ExternalDocs.Description == "" { - primary.ExternalDocs.Description = m.ExternalDocs.Description - } - if primary.ExternalDocs.URL == "" { - primary.ExternalDocs.URL = m.ExternalDocs.URL - } - } - return skipped -} - -func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { - if primary == nil { - result = m - return - } - if m == nil { - result = primary - return - } - result = primary - for k, v := range m { - if _, found := primary[k]; found { - skipped = append(skipped, k) - continue - } - primary[k] = v - } - return -} - -func initPrimary(primary *spec.Swagger) { - if primary.SecurityDefinitions == nil { - primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) - } - if primary.Security == nil { - primary.Security = make([]map[string][]string, 0, 10) - } - if primary.Produces == nil { - primary.Produces = make([]string, 0, 10) - } - if primary.Consumes == nil { - primary.Consumes = make([]string, 0, 10) - } - if primary.Tags == nil { - primary.Tags = make([]spec.Tag, 0, 10) - } - if primary.Schemes == nil { - primary.Schemes = make([]string, 0, 10) - } - if primary.Paths == nil { - primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} - } - if primary.Paths.Paths == nil { - primary.Paths.Paths = make(map[string]spec.PathItem) - } - if primary.Definitions == nil { - primary.Definitions = make(spec.Definitions) - } - if primary.Parameters == nil { - primary.Parameters = make(map[string]spec.Parameter) - } - if primary.Responses == nil { - primary.Responses = make(map[string]spec.Response) - } -} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go deleted file mode 100644 index 398c7806..00000000 --- a/vendor/github.com/go-openapi/analysis/schema.go +++ /dev/null @@ -1,234 +0,0 @@ -package analysis - -import ( - "fmt" - - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" -) - -// SchemaOpts configures the schema analyzer -type SchemaOpts struct { - Schema *spec.Schema - Root interface{} - BasePath string - _ struct{} -} - -// Schema analysis, will classify the schema according to known -// patterns. -func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { - if opts.Schema == nil { - return nil, fmt.Errorf("no schema to analyze") - } - - a := &AnalyzedSchema{ - schema: opts.Schema, - root: opts.Root, - basePath: opts.BasePath, - } - - a.initializeFlags() - a.inferKnownType() - a.inferEnum() - a.inferBaseType() - - if err := a.inferMap(); err != nil { - return nil, err - } - if err := a.inferArray(); err != nil { - return nil, err - } - - a.inferTuple() - - if err := a.inferFromRef(); err != nil { - return nil, err - } - - a.inferSimpleSchema() - return a, nil -} - -// AnalyzedSchema indicates what the schema represents -type AnalyzedSchema struct { - schema *spec.Schema - root interface{} - basePath string - - hasProps bool - hasAllOf bool - hasItems bool - hasAdditionalProps bool - hasAdditionalItems bool - hasRef bool - - IsKnownType bool - IsSimpleSchema bool - IsArray bool - IsSimpleArray bool - IsMap bool - IsSimpleMap bool - IsExtendedObject bool - IsTuple bool - IsTupleWithExtra bool - IsBaseType bool - IsEnum bool -} - -// Inherits copies value fields from other onto this schema -func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { - if other == nil { - return - } - a.hasProps = other.hasProps - a.hasAllOf = other.hasAllOf - a.hasItems = other.hasItems - a.hasAdditionalItems = other.hasAdditionalItems - a.hasAdditionalProps = other.hasAdditionalProps - a.hasRef = other.hasRef - - a.IsKnownType = other.IsKnownType - a.IsSimpleSchema = other.IsSimpleSchema - a.IsArray = other.IsArray - a.IsSimpleArray = other.IsSimpleArray - a.IsMap = other.IsMap - a.IsSimpleMap = other.IsSimpleMap - a.IsExtendedObject = other.IsExtendedObject - a.IsTuple = other.IsTuple - a.IsTupleWithExtra = other.IsTupleWithExtra - a.IsBaseType = other.IsBaseType - a.IsEnum = other.IsEnum -} - -func (a *AnalyzedSchema) inferFromRef() error { - if a.hasRef { - sch := new(spec.Schema) - sch.Ref = a.schema.Ref - err := spec.ExpandSchema(sch, a.root, nil) - if err != nil { - return err - } - rsch, err := Schema(SchemaOpts{ - Schema: sch, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - // NOTE(fredbi): currently the only cause for errors is - // unresolved ref. Since spec.ExpandSchema() expands the - // schema recursively, there is no chance to get there, - // until we add more causes for error in this schema analysis. - return err - } - a.inherits(rsch) - } - return nil -} - -func (a *AnalyzedSchema) inferSimpleSchema() { - a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap -} - -func (a *AnalyzedSchema) inferKnownType() { - tpe := a.schema.Type - format := a.schema.Format - a.IsKnownType = tpe.Contains("boolean") || - tpe.Contains("integer") || - tpe.Contains("number") || - tpe.Contains("string") || - (format != "" && strfmt.Default.ContainsName(format)) || - (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) -} - -func (a *AnalyzedSchema) inferMap() error { - if a.isObjectType() { - hasExtra := a.hasProps || a.hasAllOf - a.IsMap = a.hasAdditionalProps && !hasExtra - a.IsExtendedObject = a.hasAdditionalProps && hasExtra - if a.IsMap { - if a.schema.AdditionalProperties.Schema != nil { - msch, err := Schema(SchemaOpts{ - Schema: a.schema.AdditionalProperties.Schema, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - return err - } - a.IsSimpleMap = msch.IsSimpleSchema - } else if a.schema.AdditionalProperties.Allows { - a.IsSimpleMap = true - } - } - } - return nil -} - -func (a *AnalyzedSchema) inferArray() error { - // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple - // (yes, even if the Items array contains only one element). - // arrays in JSON schema may be unrestricted (i.e no Items specified). - // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. - // - // NOTE: the spec package misses the distinction between: - // items: [] and items: {}, so we consider both arrays here. - a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) - if a.IsArray && a.hasItems { - if a.schema.Items.Schema != nil { - itsch, err := Schema(SchemaOpts{ - Schema: a.schema.Items.Schema, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - return err - } - a.IsSimpleArray = itsch.IsSimpleSchema - } - } - if a.IsArray && !a.hasItems { - a.IsSimpleArray = true - } - return nil -} - -func (a *AnalyzedSchema) inferTuple() { - tuple := a.hasItems && a.schema.Items.Schemas != nil - a.IsTuple = tuple && !a.hasAdditionalItems - a.IsTupleWithExtra = tuple && a.hasAdditionalItems -} - -func (a *AnalyzedSchema) inferBaseType() { - if a.isObjectType() { - a.IsBaseType = a.schema.Discriminator != "" - } -} - -func (a *AnalyzedSchema) inferEnum() { - a.IsEnum = len(a.schema.Enum) > 0 -} - -func (a *AnalyzedSchema) initializeFlags() { - a.hasProps = len(a.schema.Properties) > 0 - a.hasAllOf = len(a.schema.AllOf) > 0 - a.hasRef = a.schema.Ref.String() != "" - - a.hasItems = a.schema.Items != nil && - (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) - - a.hasAdditionalProps = a.schema.AdditionalProperties != nil && - (a.schema.AdditionalProperties != nil || a.schema.AdditionalProperties.Allows) - - a.hasAdditionalItems = a.schema.AdditionalItems != nil && - (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) - -} - -func (a *AnalyzedSchema) isObjectType() bool { - return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) -} - -func (a *AnalyzedSchema) isArrayType() bool { - return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) -} diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore deleted file mode 100644 index dd91ed6a..00000000 --- a/vendor/github.com/go-openapi/errors/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml deleted file mode 100644 index 6badaf15..00000000 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ /dev/null @@ -1,20 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals diff --git a/vendor/github.com/go-openapi/errors/.travis.yml b/vendor/github.com/go-openapi/errors/.travis.yml deleted file mode 100644 index ba8a6d59..00000000 --- a/vendor/github.com/go-openapi/errors/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: gZGp9NaHxi7zawlXJXKY92BGeDR1x0tbIcTyU5nMKLq0fhIaiEBJEeALwZ4VgqsSv3DytSSF5mLH8fevAM3ixE6hxjKQ+lQuf7V/w3btCN1CSWgoua5LOh1kTnqZQtJuRvO4pzoJcT3bJWBsVZ07VGNVzzJEy/zAKCHFqBUCXShw7QemlLBcYWFNqveTlvDIfCzvouoLnPoXwxEpkjxe9uz/ZKZgAnup/fXjC8RFctmgCnkCyvJTk0Y/fZCsufixJrJhshBWTnlrFCzRmgNkz2d+i1Ls3+MJ5EJJ2Tx/A5S63dL49J1f9Kr0AKHADmulSy8JNzIckKwbyFMYUecrsW+Lsu9DhnVMy1jj5pKsJDLRi2iIU3fXTMWbcyQbXjbbnBO2mPdP3Tzme75y4D9fc8hUPeyqVv2BU26NEbQ7EF2pKJ93OXvci7HlwRBgdJa8j6mP2LEDClcPQW00g7N/OZe0cTOMa8L5AwiBlbArwqt9wv6YLJoTG0wpDhzWsFvbCg5bJxe28Yn3fIDD0Lk1I7iSnBbp/5gzF19jmxqvcT8tHRkDL4xfjbENFTZjA5uB4Z4pj4WSyWQILLV/Jwhe3fi9uQwdviFHfj5pnVrmNUiGSOQL672K5wl2c3E9mGwejvsu2dfEz28n7Y/FUnOpY3/cBS0n27JJaerS0zMKNLE= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/LICENSE b/vendor/github.com/go-openapi/errors/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/errors/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md deleted file mode 100644 index 0ce50b23..00000000 --- a/vendor/github.com/go-openapi/errors/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# OpenAPI errors [![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/errors?status.svg)](http://godoc.org/github.com/go-openapi/errors) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/errors.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) - -Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go deleted file mode 100644 index 7667cee7..00000000 --- a/vendor/github.com/go-openapi/errors/api.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" - "net/http" - "reflect" - "strings" -) - -// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. -var DefaultHTTPCode = http.StatusUnprocessableEntity - -// Error represents a error interface all swagger framework errors implement -type Error interface { - error - Code() int32 -} - -type apiError struct { - code int32 - message string -} - -func (a *apiError) Error() string { - return a.message -} - -func (a *apiError) Code() int32 { - return a.code -} - -// New creates a new API error with a code and a message -func New(code int32, message string, args ...interface{}) Error { - if len(args) > 0 { - return &apiError{code, fmt.Sprintf(message, args...)} - } - return &apiError{code, message} -} - -// NotFound creates a new not found error -func NotFound(message string, args ...interface{}) Error { - if message == "" { - message = "Not found" - } - return New(http.StatusNotFound, fmt.Sprintf(message, args...)) -} - -// NotImplemented creates a new not implemented error -func NotImplemented(message string) Error { - return New(http.StatusNotImplemented, message) -} - -// MethodNotAllowedError represents an error for when the path matches but the method doesn't -type MethodNotAllowedError struct { - code int32 - Allowed []string - message string -} - -func (m *MethodNotAllowedError) Error() string { - return m.message -} - -// Code the error code -func (m *MethodNotAllowedError) Code() int32 { - return m.code -} - -func errorAsJSON(err Error) []byte { - b, _ := json.Marshal(struct { - Code int32 `json:"code"` - Message string `json:"message"` - }{err.Code(), err.Error()}) - return b -} - -func flattenComposite(errs *CompositeError) *CompositeError { - var res []error - for _, er := range errs.Errors { - switch e := er.(type) { - case *CompositeError: - if len(e.Errors) > 0 { - flat := flattenComposite(e) - if len(flat.Errors) > 0 { - res = append(res, flat.Errors...) - } - } - default: - if e != nil { - res = append(res, e) - } - } - } - return CompositeValidationError(res...) -} - -// MethodNotAllowed creates a new method not allowed error -func MethodNotAllowed(requested string, allow []string) Error { - msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) - return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} -} - -// ServeError the error handler interface implementation -func ServeError(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Set("Content-Type", "application/json") - switch e := err.(type) { - case *CompositeError: - er := flattenComposite(e) - // strips composite errors to first element only - if len(er.Errors) > 0 { - ServeError(rw, r, er.Errors[0]) - } else { - // guard against empty CompositeError (invalid construct) - ServeError(rw, r, nil) - } - case *MethodNotAllowedError: - rw.Header().Add("Allow", strings.Join(err.(*MethodNotAllowedError).Allowed, ",")) - rw.WriteHeader(asHTTPCode(int(e.Code()))) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(e)) - } - case Error: - value := reflect.ValueOf(e) - if value.Kind() == reflect.Ptr && value.IsNil() { - rw.WriteHeader(http.StatusInternalServerError) - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) - return - } - rw.WriteHeader(asHTTPCode(int(e.Code()))) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(e)) - } - case nil: - rw.WriteHeader(http.StatusInternalServerError) - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) - default: - rw.WriteHeader(http.StatusInternalServerError) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) - } - } -} - -func asHTTPCode(input int) int { - if input >= 600 { - return DefaultHTTPCode - } - return input -} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go deleted file mode 100644 index 0545b501..00000000 --- a/vendor/github.com/go-openapi/errors/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import "net/http" - -// Unauthenticated returns an unauthenticated error -func Unauthenticated(scheme string) Error { - return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) -} diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go deleted file mode 100644 index 963d4274..00000000 --- a/vendor/github.com/go-openapi/errors/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - -Package errors provides an Error interface and several concrete types -implementing this interface to manage API errors and JSON-schema validation -errors. - -A middleware handler ServeError() is provided to serve the errors types -it defines. - -It is used throughout the various go-openapi toolkit libraries -(https://github.com/go-openapi). - -*/ -package errors diff --git a/vendor/github.com/go-openapi/errors/go.mod b/vendor/github.com/go-openapi/errors/go.mod deleted file mode 100644 index 08414300..00000000 --- a/vendor/github.com/go-openapi/errors/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -module github.com/go-openapi/errors - -require ( - github.com/stretchr/objx v0.2.0 // indirect - github.com/stretchr/testify v1.3.0 -) diff --git a/vendor/github.com/go-openapi/errors/go.sum b/vendor/github.com/go-openapi/errors/go.sum deleted file mode 100644 index e7314e27..00000000 --- a/vendor/github.com/go-openapi/errors/go.sum +++ /dev/null @@ -1,9 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go deleted file mode 100644 index 0360c094..00000000 --- a/vendor/github.com/go-openapi/errors/headers.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "fmt" - "net/http" -) - -// Validation represents a failure of a precondition -type Validation struct { - code int32 - Name string - In string - Value interface{} - message string - Values []interface{} -} - -func (e *Validation) Error() string { - return e.message -} - -// Code the error code -func (e *Validation) Code() int32 { - return e.code -} - -// ValidateName produces an error message name for an aliased property -func (e *Validation) ValidateName(name string) *Validation { - if e.Name == "" && name != "" { - e.Name = name - e.message = name + e.message - } - return e -} - -const ( - contentTypeFail = `unsupported media type %q, only %v are allowed` - responseFormatFail = `unsupported media type requested, only %v are available` -) - -// InvalidContentType error for an invalid content type -func InvalidContentType(value string, allowed []string) *Validation { - values := make([]interface{}, 0, len(allowed)) - for _, v := range allowed { - values = append(values, v) - } - return &Validation{ - code: http.StatusUnsupportedMediaType, - Name: "Content-Type", - In: "header", - Value: value, - Values: values, - message: fmt.Sprintf(contentTypeFail, value, allowed), - } -} - -// InvalidResponseFormat error for an unacceptable response format request -func InvalidResponseFormat(value string, allowed []string) *Validation { - values := make([]interface{}, 0, len(allowed)) - for _, v := range allowed { - values = append(values, v) - } - return &Validation{ - code: http.StatusNotAcceptable, - Name: "Accept", - In: "header", - Value: value, - Values: values, - message: fmt.Sprintf(responseFormatFail, allowed), - } -} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go deleted file mode 100644 index 6390d463..00000000 --- a/vendor/github.com/go-openapi/errors/middleware.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "bytes" - "fmt" - "strings" -) - -// APIVerificationFailed is an error that contains all the missing info for a mismatched section -// between the api registrations and the api spec -type APIVerificationFailed struct { - Section string - MissingSpecification []string - MissingRegistration []string -} - -// -func (v *APIVerificationFailed) Error() string { - buf := bytes.NewBuffer(nil) - - hasRegMissing := len(v.MissingRegistration) > 0 - hasSpecMissing := len(v.MissingSpecification) > 0 - - if hasRegMissing { - buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) - } - - if hasRegMissing && hasSpecMissing { - buf.WriteString("\n") - } - - if hasSpecMissing { - buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) - } - - return buf.String() -} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go deleted file mode 100644 index 1bae8730..00000000 --- a/vendor/github.com/go-openapi/errors/parsing.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import "fmt" - -// ParseError respresents a parsing error -type ParseError struct { - code int32 - Name string - In string - Value string - Reason error - message string -} - -func (e *ParseError) Error() string { - return e.message -} - -// Code returns the http status code for this error -func (e *ParseError) Code() int32 { - return e.code -} - -const ( - parseErrorTemplContent = `parsing %s %s from %q failed, because %s` - parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` -) - -// NewParseError creates a new parse error -func NewParseError(name, in, value string, reason error) *ParseError { - var msg string - if in == "" { - msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) - } else { - msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) - } - return &ParseError{ - code: 400, - Name: name, - In: in, - Value: value, - Reason: reason, - message: msg, - } -} diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go deleted file mode 100644 index 14fb2c5f..00000000 --- a/vendor/github.com/go-openapi/errors/schema.go +++ /dev/null @@ -1,562 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "fmt" - "strings" -) - -const ( - invalidType = "%s is an invalid type name" - typeFail = "%s in %s must be of type %s" - typeFailWithData = "%s in %s must be of type %s: %q" - typeFailWithError = "%s in %s must be of type %s, because: %s" - requiredFail = "%s in %s is required" - tooLongMessage = "%s in %s should be at most %d chars long" - tooShortMessage = "%s in %s should be at least %d chars long" - patternFail = "%s in %s should match '%s'" - enumFail = "%s in %s should be one of %v" - multipleOfFail = "%s in %s should be a multiple of %v" - maxIncFail = "%s in %s should be less than or equal to %v" - maxExcFail = "%s in %s should be less than %v" - minIncFail = "%s in %s should be greater than or equal to %v" - minExcFail = "%s in %s should be greater than %v" - uniqueFail = "%s in %s shouldn't contain duplicates" - maxItemsFail = "%s in %s should have at most %d items" - minItemsFail = "%s in %s should have at least %d items" - typeFailNoIn = "%s must be of type %s" - typeFailWithDataNoIn = "%s must be of type %s: %q" - typeFailWithErrorNoIn = "%s must be of type %s, because: %s" - requiredFailNoIn = "%s is required" - tooLongMessageNoIn = "%s should be at most %d chars long" - tooShortMessageNoIn = "%s should be at least %d chars long" - patternFailNoIn = "%s should match '%s'" - enumFailNoIn = "%s should be one of %v" - multipleOfFailNoIn = "%s should be a multiple of %v" - maxIncFailNoIn = "%s should be less than or equal to %v" - maxExcFailNoIn = "%s should be less than %v" - minIncFailNoIn = "%s should be greater than or equal to %v" - minExcFailNoIn = "%s should be greater than %v" - uniqueFailNoIn = "%s shouldn't contain duplicates" - maxItemsFailNoIn = "%s should have at most %d items" - minItemsFailNoIn = "%s should have at least %d items" - noAdditionalItems = "%s in %s can't have additional items" - noAdditionalItemsNoIn = "%s can't have additional items" - tooFewProperties = "%s in %s should have at least %d properties" - tooFewPropertiesNoIn = "%s should have at least %d properties" - tooManyProperties = "%s in %s should have at most %d properties" - tooManyPropertiesNoIn = "%s should have at most %d properties" - unallowedProperty = "%s.%s in %s is a forbidden property" - unallowedPropertyNoIn = "%s.%s is a forbidden property" - failedAllPatternProps = "%s.%s in %s failed all pattern properties" - failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" - multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" -) - -// All code responses can be used to differentiate errors for different handling -// by the consuming program -const ( - // CompositeErrorCode remains 422 for backwards-compatibility - // and to separate it from validation errors with cause - CompositeErrorCode = 422 - // InvalidTypeCode is used for any subclass of invalid types - InvalidTypeCode = 600 + iota - RequiredFailCode - TooLongFailCode - TooShortFailCode - PatternFailCode - EnumFailCode - MultipleOfFailCode - MaxFailCode - MinFailCode - UniqueFailCode - MaxItemsFailCode - MinItemsFailCode - NoAdditionalItemsCode - TooFewPropertiesCode - TooManyPropertiesCode - UnallowedPropertyCode - FailedAllPatternPropsCode - MultipleOfMustBePositiveCode -) - -// CompositeError is an error that groups several errors together -type CompositeError struct { - Errors []error - code int32 - message string -} - -// Code for this error -func (c *CompositeError) Code() int32 { - return c.code -} - -func (c *CompositeError) Error() string { - if len(c.Errors) > 0 { - msgs := []string{c.message + ":"} - for _, e := range c.Errors { - msgs = append(msgs, e.Error()) - } - return strings.Join(msgs, "\n") - } - return c.message -} - -// CompositeValidationError an error to wrap a bunch of other errors -func CompositeValidationError(errors ...error) *CompositeError { - return &CompositeError{ - code: CompositeErrorCode, - Errors: append([]error{}, errors...), - message: "validation failure list", - } -} - -// FailedAllPatternProperties an error for when the property doesn't match a pattern -func FailedAllPatternProperties(name, in, key string) *Validation { - msg := fmt.Sprintf(failedAllPatternProps, name, key, in) - if in == "" { - msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) - } - return &Validation{ - code: FailedAllPatternPropsCode, - Name: name, - In: in, - Value: key, - message: msg, - } -} - -// PropertyNotAllowed an error for when the property doesn't match a pattern -func PropertyNotAllowed(name, in, key string) *Validation { - msg := fmt.Sprintf(unallowedProperty, name, key, in) - if in == "" { - msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) - } - return &Validation{ - code: UnallowedPropertyCode, - Name: name, - In: in, - Value: key, - message: msg, - } -} - -// TooFewProperties an error for an object with too few properties -func TooFewProperties(name, in string, n int64) *Validation { - msg := fmt.Sprintf(tooFewProperties, name, in, n) - if in == "" { - msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) - } - return &Validation{ - code: TooFewPropertiesCode, - Name: name, - In: in, - Value: n, - message: msg, - } -} - -// TooManyProperties an error for an object with too many properties -func TooManyProperties(name, in string, n int64) *Validation { - msg := fmt.Sprintf(tooManyProperties, name, in, n) - if in == "" { - msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) - } - return &Validation{ - code: TooManyPropertiesCode, - Name: name, - In: in, - Value: n, - message: msg, - } -} - -// AdditionalItemsNotAllowed an error for invalid additional items -func AdditionalItemsNotAllowed(name, in string) *Validation { - msg := fmt.Sprintf(noAdditionalItems, name, in) - if in == "" { - msg = fmt.Sprintf(noAdditionalItemsNoIn, name) - } - return &Validation{ - code: NoAdditionalItemsCode, - Name: name, - In: in, - message: msg, - } -} - -// InvalidCollectionFormat another flavor of invalid type error -func InvalidCollectionFormat(name, in, format string) *Validation { - return &Validation{ - code: InvalidTypeCode, - Name: name, - In: in, - Value: format, - message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), - } -} - -// InvalidTypeName an error for when the type is invalid -func InvalidTypeName(typeName string) *Validation { - return &Validation{ - code: InvalidTypeCode, - Value: typeName, - message: fmt.Sprintf(invalidType, typeName), - } -} - -// InvalidType creates an error for when the type is invalid -func InvalidType(name, in, typeName string, value interface{}) *Validation { - var message string - - if in != "" { - switch value.(type) { - case string: - message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) - case error: - message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) - default: - message = fmt.Sprintf(typeFail, name, in, typeName) - } - } else { - switch value.(type) { - case string: - message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) - case error: - message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) - default: - message = fmt.Sprintf(typeFailNoIn, name, typeName) - } - } - - return &Validation{ - code: InvalidTypeCode, - Name: name, - In: in, - Value: value, - message: message, - } - -} - -// DuplicateItems error for when an array contains duplicates -func DuplicateItems(name, in string) *Validation { - msg := fmt.Sprintf(uniqueFail, name, in) - if in == "" { - msg = fmt.Sprintf(uniqueFailNoIn, name) - } - return &Validation{ - code: UniqueFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooManyItems error for when an array contains too many items -func TooManyItems(name, in string, max int64) *Validation { - msg := fmt.Sprintf(maxItemsFail, name, in, max) - if in == "" { - msg = fmt.Sprintf(maxItemsFailNoIn, name, max) - } - - return &Validation{ - code: MaxItemsFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooFewItems error for when an array contains too few items -func TooFewItems(name, in string, min int64) *Validation { - msg := fmt.Sprintf(minItemsFail, name, in, min) - if in == "" { - msg = fmt.Sprintf(minItemsFailNoIn, name, min) - } - return &Validation{ - code: MinItemsFailCode, - Name: name, - In: in, - message: msg, - } -} - -// ExceedsMaximumInt error for when maxinum validation fails -func ExceedsMaximumInt(name, in string, max int64, exclusive bool) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: max, - message: message, - } -} - -// ExceedsMaximumUint error for when maxinum validation fails -func ExceedsMaximumUint(name, in string, max uint64, exclusive bool) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: max, - message: message, - } -} - -// ExceedsMaximum error for when maxinum validation fails -func ExceedsMaximum(name, in string, max float64, exclusive bool) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: max, - message: message, - } -} - -// ExceedsMinimumInt error for when maxinum validation fails -func ExceedsMinimumInt(name, in string, min int64, exclusive bool) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: min, - message: message, - } -} - -// ExceedsMinimumUint error for when maxinum validation fails -func ExceedsMinimumUint(name, in string, min uint64, exclusive bool) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: min, - message: message, - } -} - -// ExceedsMinimum error for when maxinum validation fails -func ExceedsMinimum(name, in string, min float64, exclusive bool) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: min, - message: message, - } -} - -// NotMultipleOf error for when multiple of validation fails -func NotMultipleOf(name, in string, multiple interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) - } else { - msg = fmt.Sprintf(multipleOfFail, name, in, multiple) - } - return &Validation{ - code: MultipleOfFailCode, - Name: name, - In: in, - Value: multiple, - message: msg, - } -} - -// EnumFail error for when an enum validation fails -func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(enumFailNoIn, name, values) - } else { - msg = fmt.Sprintf(enumFail, name, in, values) - } - - return &Validation{ - code: EnumFailCode, - Name: name, - In: in, - Value: value, - Values: values, - message: msg, - } -} - -// Required error for when a value is missing -func Required(name, in string) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(requiredFailNoIn, name) - } else { - msg = fmt.Sprintf(requiredFail, name, in) - } - return &Validation{ - code: RequiredFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooLong error for when a string is too long -func TooLong(name, in string, max int64) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(tooLongMessageNoIn, name, max) - } else { - msg = fmt.Sprintf(tooLongMessage, name, in, max) - } - return &Validation{ - code: TooLongFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooShort error for when a string is too short -func TooShort(name, in string, min int64) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(tooShortMessageNoIn, name, min) - } else { - msg = fmt.Sprintf(tooShortMessage, name, in, min) - } - - return &Validation{ - code: TooShortFailCode, - Name: name, - In: in, - message: msg, - } -} - -// FailedPattern error for when a string fails a regex pattern match -// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. -func FailedPattern(name, in, pattern string) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(patternFailNoIn, name, pattern) - } else { - msg = fmt.Sprintf(patternFail, name, in, pattern) - } - - return &Validation{ - code: PatternFailCode, - Name: name, - In: in, - message: msg, - } -} - -// MultipleOfMustBePositive error for when a -// multipleOf factor is negative -func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { - return &Validation{ - code: MultipleOfMustBePositiveCode, - Name: name, - In: in, - Value: factor, - message: fmt.Sprintf(multipleOfMustBePositive, name, factor), - } -} diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig deleted file mode 100644 index 3152da69..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore deleted file mode 100644 index 769c2440..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml deleted file mode 100644 index 9aef9184..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md deleted file mode 100644 index 813788af..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) -An implementation of JSON Pointer - Go language - -## Status -Completed YES - -Tested YES - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/go.mod b/vendor/github.com/go-openapi/jsonpointer/go.mod deleted file mode 100644 index 3e45e225..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/go-openapi/jsonpointer - -require ( - github.com/go-openapi/swag v0.19.5 - github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect - github.com/stretchr/testify v1.3.0 -) - -go 1.13 diff --git a/vendor/github.com/go-openapi/jsonpointer/go.sum b/vendor/github.com/go-openapi/jsonpointer/go.sum deleted file mode 100644 index 953d4f35..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/go.sum +++ /dev/null @@ -1,24 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go deleted file mode 100644 index b284eb77..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 - -package jsonpointer - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/go-openapi/swag" -) - -const ( - emptyPointer = `` - pointerSeparator = `/` - - invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator -) - -var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() -var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() - -// JSONPointable is an interface for structs to implement when they need to customize the -// json pointer process -type JSONPointable interface { - JSONLookup(string) (interface{}, error) -} - -// JSONSetable is an interface for structs to implement when they need to customize the -// json pointer process -type JSONSetable interface { - JSONSet(string, interface{}) error -} - -// New creates a new json pointer for the given string -func New(jsonPointerString string) (Pointer, error) { - - var p Pointer - err := p.parse(jsonPointerString) - return p, err - -} - -// Pointer the json pointer reprsentation -type Pointer struct { - referenceTokens []string -} - -// "Constructor", parses the given string JSON pointer -func (p *Pointer) parse(jsonPointerString string) error { - - var err error - - if jsonPointerString != emptyPointer { - if !strings.HasPrefix(jsonPointerString, pointerSeparator) { - err = errors.New(invalidStart) - } else { - referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } - } - } - - return err -} - -// Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { - return p.get(document, swag.DefaultJSONNameProvider) -} - -// Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { - return document, p.set(document, value, swag.DefaultJSONNameProvider) -} - -// GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { - return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) -} - -// SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { - return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) -} - -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind := rValue.Kind() - - switch kind { - - case reflect.Struct: - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return nil, kind, err - } - return r, kind, nil - } - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return nil, kind, fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - return fld.Interface(), kind, nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - - if mv.IsValid() { - return mv.Interface(), kind, nil - } - return nil, kind, fmt.Errorf("object has no key %q", decodedToken) - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return nil, kind, err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - return elem.Interface(), kind, nil - - default: - return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) - } - -} - -func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { - rValue := reflect.Indirect(reflect.ValueOf(node)) - switch rValue.Kind() { - - case reflect.Struct: - if ns, ok := node.(JSONSetable); ok { // pointer impl - return ns.JSONSet(decodedToken, data) - } - - if rValue.Type().Implements(jsonSetableType) { - return node.(JSONSetable).JSONSet(decodedToken, data) - } - - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - if fld.IsValid() { - fld.Set(reflect.ValueOf(data)) - } - return nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - rValue.SetMapIndex(kv, reflect.ValueOf(data)) - return nil - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - if !elem.CanSet() { - return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) - } - elem.Set(reflect.ValueOf(data)) - return nil - - default: - return fmt.Errorf("invalid token reference %q", decodedToken) - } - -} - -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - - if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider - } - - kind := reflect.Invalid - - // Full document when empty - if len(p.referenceTokens) == 0 { - return node, kind, nil - } - - for _, token := range p.referenceTokens { - - decodedToken := Unescape(token) - - r, knd, err := getSingleImpl(node, decodedToken, nameProvider) - if err != nil { - return nil, knd, err - } - node, kind = r, knd - - } - - rValue := reflect.ValueOf(node) - kind = rValue.Kind() - - return node, kind, nil -} - -func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { - knd := reflect.ValueOf(node).Kind() - - if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") - } - - if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider - } - - // Full document when empty - if len(p.referenceTokens) == 0 { - return nil - } - - lastI := len(p.referenceTokens) - 1 - for i, token := range p.referenceTokens { - isLastToken := i == lastI - decodedToken := Unescape(token) - - if isLastToken { - - return setSingleImpl(node, data, decodedToken, nameProvider) - } - - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind := rValue.Kind() - - switch kind { - - case reflect.Struct: - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return err - } - fld := reflect.ValueOf(r) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = r - continue - } - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = fld.Interface() - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - - if !mv.IsValid() { - return fmt.Errorf("object has no key %q", decodedToken) - } - if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { - node = mv.Addr().Interface() - continue - } - node = mv.Interface() - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { - node = elem.Addr().Interface() - continue - } - node = elem.Interface() - - default: - return fmt.Errorf("invalid token reference %q", decodedToken) - } - - } - - return nil -} - -// DecodedTokens returns the decoded tokens -func (p *Pointer) DecodedTokens() []string { - result := make([]string, 0, len(p.referenceTokens)) - for _, t := range p.referenceTokens { - result = append(result, Unescape(t)) - } - return result -} - -// IsEmpty returns true if this is an empty json pointer -// this indicates that it points to the root document -func (p *Pointer) IsEmpty() bool { - return len(p.referenceTokens) == 0 -} - -// Pointer to string representation function -func (p *Pointer) String() string { - - if len(p.referenceTokens) == 0 { - return emptyPointer - } - - pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) - - return pointerString -} - -// Specific JSON pointer encoding here -// ~0 => ~ -// ~1 => / -// ... and vice versa - -const ( - encRefTok0 = `~0` - encRefTok1 = `~1` - decRefTok0 = `~` - decRefTok1 = `/` -) - -// Unescape unescapes a json pointer reference token string to the original representation -func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) - return step2 -} - -// Escape escapes a pointer reference token string -func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) - return step2 -} diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore deleted file mode 100644 index 769c2440..00000000 --- a/vendor/github.com/go-openapi/jsonreference/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml deleted file mode 100644 index 40b90757..00000000 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/jsonreference/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md deleted file mode 100644 index 66345f4c..00000000 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) -An implementation of JSON Reference - Go language - -## Status -Work in progress ( 90% done ) - -## Dependencies -https://github.com/go-openapi/jsonpointer - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/go.mod b/vendor/github.com/go-openapi/jsonreference/go.mod deleted file mode 100644 index 35adddfe..00000000 --- a/vendor/github.com/go-openapi/jsonreference/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/go-openapi/jsonreference - -require ( - github.com/PuerkitoBio/purell v1.1.1 - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/go-openapi/jsonpointer v0.19.2 - github.com/stretchr/testify v1.3.0 - golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect - golang.org/x/text v0.3.2 // indirect -) diff --git a/vendor/github.com/go-openapi/jsonreference/go.sum b/vendor/github.com/go-openapi/jsonreference/go.sum deleted file mode 100644 index f1a7a34e..00000000 --- a/vendor/github.com/go-openapi/jsonreference/go.sum +++ /dev/null @@ -1,36 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go deleted file mode 100644 index 3bc0a6e2..00000000 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 - -package jsonreference - -import ( - "errors" - "net/url" - "strings" - - "github.com/PuerkitoBio/purell" - "github.com/go-openapi/jsonpointer" -) - -const ( - fragmentRune = `#` -) - -// New creates a new reference for the given string -func New(jsonReferenceString string) (Ref, error) { - - var r Ref - err := r.parse(jsonReferenceString) - return r, err - -} - -// MustCreateRef parses the ref string and panics when it's invalid. -// Use the New method for a version that returns an error -func MustCreateRef(ref string) Ref { - r, err := New(ref) - if err != nil { - panic(err) - } - return r -} - -// Ref represents a json reference object -type Ref struct { - referenceURL *url.URL - referencePointer jsonpointer.Pointer - - HasFullURL bool - HasURLPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool -} - -// GetURL gets the URL for this reference -func (r *Ref) GetURL() *url.URL { - return r.referenceURL -} - -// GetPointer gets the json pointer for this reference -func (r *Ref) GetPointer() *jsonpointer.Pointer { - return &r.referencePointer -} - -// String returns the best version of the url for this reference -func (r *Ref) String() string { - - if r.referenceURL != nil { - return r.referenceURL.String() - } - - if r.HasFragmentOnly { - return fragmentRune + r.referencePointer.String() - } - - return r.referencePointer.String() -} - -// IsRoot returns true if this reference is a root document -func (r *Ref) IsRoot() bool { - return r.referenceURL != nil && - !r.IsCanonical() && - !r.HasURLPathOnly && - r.referenceURL.Fragment == "" -} - -// IsCanonical returns true when this pointer starts with http(s):// or file:// -func (r *Ref) IsCanonical() bool { - return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) -} - -// "Constructor", parses the given string JSON reference -func (r *Ref) parse(jsonReferenceString string) error { - - parsed, err := url.Parse(jsonReferenceString) - if err != nil { - return err - } - - r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) - refURL := r.referenceURL - - if refURL.Scheme != "" && refURL.Host != "" { - r.HasFullURL = true - } else { - if refURL.Path != "" { - r.HasURLPathOnly = true - } else if refURL.RawQuery == "" && refURL.Fragment != "" { - r.HasFragmentOnly = true - } - } - - r.HasFileScheme = refURL.Scheme == "file" - r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") - - // invalid json-pointer error means url has no json-pointer fragment. simply ignore error - r.referencePointer, _ = jsonpointer.New(refURL.Fragment) - - return nil -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - childURL := child.GetURL() - parentURL := r.GetURL() - if childURL == nil { - return nil, errors.New("child url is nil") - } - if parentURL == nil { - return &child, nil - } - - ref, err := New(parentURL.ResolveReference(childURL).String()) - if err != nil { - return nil, err - } - return &ref, nil -} diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig deleted file mode 100644 index 3152da69..00000000 --- a/vendor/github.com/go-openapi/loads/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore deleted file mode 100644 index e4f15f17..00000000 --- a/vendor/github.com/go-openapi/loads/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -secrets.yml -coverage.out -profile.cov -profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml deleted file mode 100644 index 1932914e..00000000 --- a/vendor/github.com/go-openapi/loads/.golangci.yml +++ /dev/null @@ -1,22 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - gochecknoinits diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml deleted file mode 100644 index 8a7e05d9..00000000 --- a/vendor/github.com/go-openapi/loads/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/loads/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md deleted file mode 100644 index 071cf69a..00000000 --- a/vendor/github.com/go-openapi/loads/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/loads.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) - -Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go deleted file mode 100644 index 3046da4c..00000000 --- a/vendor/github.com/go-openapi/loads/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package loads provides document loading methods for swagger (OAI) specifications. - -It is used by other go-openapi packages to load and run analysis on local or remote spec documents. - -*/ -package loads diff --git a/vendor/github.com/go-openapi/loads/go.mod b/vendor/github.com/go-openapi/loads/go.mod deleted file mode 100644 index 29351d58..00000000 --- a/vendor/github.com/go-openapi/loads/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/go-openapi/loads - -require ( - github.com/go-openapi/analysis v0.19.5 - github.com/go-openapi/spec v0.19.3 - github.com/go-openapi/swag v0.19.5 - github.com/stretchr/testify v1.3.0 - gopkg.in/yaml.v2 v2.2.2 -) - -go 1.13 diff --git a/vendor/github.com/go-openapi/loads/go.sum b/vendor/github.com/go-openapi/loads/go.sum deleted file mode 100644 index ac9e61b2..00000000 --- a/vendor/github.com/go-openapi/loads/go.sum +++ /dev/null @@ -1,96 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go deleted file mode 100644 index e4b4a3cf..00000000 --- a/vendor/github.com/go-openapi/loads/spec.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package loads - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - "net/url" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// JSONDoc loads a json document from either a file or a remote url -func JSONDoc(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// DocLoader represents a doc loader type -type DocLoader func(string) (json.RawMessage, error) - -// DocMatcher represents a predicate to check if a loader matches -type DocMatcher func(string) bool - -var ( - loaders *loader - defaultLoader *loader -) - -func init() { - defaultLoader = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} - loaders = defaultLoader - spec.PathLoader = loaders.Fn - AddLoader(swag.YAMLMatcher, swag.YAMLDoc) - - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) - //gob.Register(spec.Refable{}) -} - -// AddLoader for a document -func AddLoader(predicate DocMatcher, load DocLoader) { - prev := loaders - loaders = &loader{ - Match: predicate, - Fn: load, - Next: prev, - } - spec.PathLoader = loaders.Fn -} - -type loader struct { - Fn DocLoader - Match DocMatcher - Next *loader -} - -// JSONSpec loads a spec from a json document -func JSONSpec(path string) (*Document, error) { - data, err := JSONDoc(path) - if err != nil { - return nil, err - } - // convert to json - return Analyzed(data, "") -} - -// Document represents a swagger spec document -type Document struct { - // specAnalyzer - Analyzer *analysis.Spec - spec *spec.Swagger - specFilePath string - origSpec *spec.Swagger - schema *spec.Schema - raw json.RawMessage -} - -// Embedded returns a Document based on embedded specs. No analysis is required -func Embedded(orig, flat json.RawMessage) (*Document, error) { - var origSpec, flatSpec spec.Swagger - if err := json.Unmarshal(orig, &origSpec); err != nil { - return nil, err - } - if err := json.Unmarshal(flat, &flatSpec); err != nil { - return nil, err - } - return &Document{ - raw: orig, - origSpec: &origSpec, - spec: &flatSpec, - }, nil -} - -// Spec loads a new spec document -func Spec(path string) (*Document, error) { - specURL, err := url.Parse(path) - if err != nil { - return nil, err - } - var lastErr error - for l := loaders.Next; l != nil; l = l.Next { - if loaders.Match(specURL.Path) { - b, err2 := loaders.Fn(path) - if err2 != nil { - lastErr = err2 - continue - } - doc, err3 := Analyzed(b, "") - if err3 != nil { - return nil, err3 - } - if doc != nil { - doc.specFilePath = path - } - return doc, nil - } - } - if lastErr != nil { - return nil, lastErr - } - b, err := defaultLoader.Fn(path) - if err != nil { - return nil, err - } - - document, err := Analyzed(b, "") - if document != nil { - document.specFilePath = path - } - - return document, err -} - -// Analyzed creates a new analyzed spec document -func Analyzed(data json.RawMessage, version string) (*Document, error) { - if version == "" { - version = "2.0" - } - if version != "2.0" { - return nil, fmt.Errorf("spec version %q is not supported", version) - } - - raw := data - trimmed := bytes.TrimSpace(data) - if len(trimmed) > 0 { - if trimmed[0] != '{' && trimmed[0] != '[' { - yml, err := swag.BytesToYAMLDoc(trimmed) - if err != nil { - return nil, fmt.Errorf("analyzed: %v", err) - } - d, err := swag.YAMLToJSON(yml) - if err != nil { - return nil, fmt.Errorf("analyzed: %v", err) - } - raw = d - } - } - - swspec := new(spec.Swagger) - if err := json.Unmarshal(raw, swspec); err != nil { - return nil, err - } - - origsqspec, err := cloneSpec(swspec) - if err != nil { - return nil, err - } - - d := &Document{ - Analyzer: analysis.New(swspec), - schema: spec.MustLoadSwagger20Schema(), - spec: swspec, - raw: raw, - origSpec: origsqspec, - } - return d, nil -} - -// Expanded expands the ref fields in the spec document and returns a new spec document -func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { - swspec := new(spec.Swagger) - if err := json.Unmarshal(d.raw, swspec); err != nil { - return nil, err - } - - var expandOptions *spec.ExpandOptions - if len(options) > 0 { - expandOptions = options[0] - } else { - expandOptions = &spec.ExpandOptions{ - RelativeBase: d.specFilePath, - } - } - - if err := spec.ExpandSpec(swspec, expandOptions); err != nil { - return nil, err - } - - dd := &Document{ - Analyzer: analysis.New(swspec), - spec: swspec, - specFilePath: d.specFilePath, - schema: spec.MustLoadSwagger20Schema(), - raw: d.raw, - origSpec: d.origSpec, - } - return dd, nil -} - -// BasePath the base path for this spec -func (d *Document) BasePath() string { - return d.spec.BasePath -} - -// Version returns the version of this spec -func (d *Document) Version() string { - return d.spec.Swagger -} - -// Schema returns the swagger 2.0 schema -func (d *Document) Schema() *spec.Schema { - return d.schema -} - -// Spec returns the swagger spec object model -func (d *Document) Spec() *spec.Swagger { - return d.spec -} - -// Host returns the host for the API -func (d *Document) Host() string { - return d.spec.Host -} - -// Raw returns the raw swagger spec as json bytes -func (d *Document) Raw() json.RawMessage { - return d.raw -} - -// OrigSpec yields the original spec -func (d *Document) OrigSpec() *spec.Swagger { - return d.origSpec -} - -// ResetDefinitions gives a shallow copy with the models reset -func (d *Document) ResetDefinitions() *Document { - defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) - for k, v := range d.origSpec.Definitions { - defs[k] = v - } - - d.spec.Definitions = defs - return d -} - -// Pristine creates a new pristine document instance based on the input data -func (d *Document) Pristine() *Document { - dd, _ := Analyzed(d.Raw(), d.Version()) - return dd -} - -// SpecFilePath returns the file path of the spec if one is defined -func (d *Document) SpecFilePath() string { - return d.specFilePath -} - -func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { - var b bytes.Buffer - if err := gob.NewEncoder(&b).Encode(src); err != nil { - return nil, err - } - - var dst spec.Swagger - if err := gob.NewDecoder(&b).Decode(&dst); err != nil { - return nil, err - } - return &dst, nil -} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig deleted file mode 100644 index 3152da69..00000000 --- a/vendor/github.com/go-openapi/runtime/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore deleted file mode 100644 index fea8b84e..00000000 --- a/vendor/github.com/go-openapi/runtime/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -secrets.yml -coverage.out -*.cov -*.out -playground diff --git a/vendor/github.com/go-openapi/runtime/.travis.yml b/vendor/github.com/go-openapi/runtime/.travis.yml deleted file mode 100644 index 2fc7b58f..00000000 --- a/vendor/github.com/go-openapi/runtime/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: EmObnQuM9Mw8J9vpFaKKHqSMN4Wsr/A9+v7ewAD5cEhA0T1P4m7MbJMiJOhxUhj/X+BFh2DamW+P2lT8mybj5wg8wnkQ2BteKA8Tawi6f9PRw2NRheO8tAi8o/npLnlmet0kc93mn+oLuqHw36w4+j5mkOl2FghkfGiUVhwrhkCP7KXQN+3TU87e+/HzQumlJ3nsE+6terVxkH3PmaUTsS5ONaODZfuxFpfb7RsoEl3skHf6d+tr+1nViLxxly7558Nc33C+W1mr0qiEvMLZ+kJ/CpGWBJ6CUJM3jm6hNe2eMuIPwEK2hxZob8c7n22VPap4K6a0bBRoydoDXaba+2sD7Ym6ivDO/DVyL44VeBBLyIiIBylDGQdZH+6SoWm90Qe/i7tnY/T5Ao5igT8f3cfQY1c3EsTfqmlDfrhmACBmwSlgkdVBLTprHL63JMY24LWmh4jhxsmMRZhCL4dze8su1w6pLN/pD1pGHtKYCEVbdTmaM3PblNRFf12XB7qosmQsgUndH4Vq3bTbU0s1pKjeDhRyLvFzvR0TBbo0pDLEoF1A/i5GVFWa7yLZNUDudQERRh7qv/xBl2excIaQ1sV4DSVm7bAE9l6Kp+yeHQJW2uN6Y3X8wu9gB9nv9l5HBze7wh8KE6PyWAOLYYqZg9/sAtsv/2GcQqXcKFF1zcA= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/github.com/go-openapi/runtime/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/runtime/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md deleted file mode 100644 index 5b1ec649..00000000 --- a/vendor/github.com/go-openapi/runtime/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# runtime [![Build Status](https://travis-ci.org/go-openapi/runtime.svg?branch=client-context)](https://travis-ci.org/go-openapi/runtime) [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/runtime?status.svg)](http://godoc.org/github.com/go-openapi/runtime) - -# golang Open-API toolkit - runtime - -The runtime component for use in codegeneration or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go deleted file mode 100644 index 4459025b..00000000 --- a/vendor/github.com/go-openapi/runtime/bytestream.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - - "github.com/go-openapi/swag" -) - -func defaultCloser() error { return nil } - -type byteStreamOpt func(opts *byteStreamOpts) - -// ClosesStream when the bytestream consumer or producer is finished -func ClosesStream(opts *byteStreamOpts) { - opts.Close = true -} - -type byteStreamOpts struct { - Close bool -} - -// ByteStreamConsumer creates a consmer for byte streams, -// takes a Writer/BinaryUnmarshaler interface or binary slice by reference, -// and reads from the provided reader -func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { - var vals byteStreamOpts - for _, opt := range opts { - opt(&vals) - } - - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("ByteStreamConsumer requires a reader") // early exit - } - - close := defaultCloser - if vals.Close { - if cl, ok := reader.(io.Closer); ok { - close = cl.Close - } - } - defer close() - - if wrtr, ok := data.(io.Writer); ok { - _, err := io.Copy(wrtr, reader) - return err - } - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(reader) - if err != nil { - return err - } - b := buf.Bytes() - - if bu, ok := data.(encoding.BinaryUnmarshaler); ok { - return bu.UnmarshalBinary(b) - } - - if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr { - v := reflect.Indirect(reflect.ValueOf(data)) - if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { - v.SetBytes(b) - return nil - } - } - - return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", - data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") - }) -} - -// ByteStreamProducer creates a producer for byte streams, -// takes a Reader/BinaryMarshaler interface or binary slice, -// and writes to a writer (essentially a pipe) -func ByteStreamProducer(opts ...byteStreamOpt) Producer { - var vals byteStreamOpts - for _, opt := range opts { - opt(&vals) - } - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("ByteStreamProducer requires a writer") // early exit - } - close := defaultCloser - if vals.Close { - if cl, ok := writer.(io.Closer); ok { - close = cl.Close - } - } - defer close() - - if rc, ok := data.(io.ReadCloser); ok { - defer rc.Close() - } - - if rdr, ok := data.(io.Reader); ok { - _, err := io.Copy(writer, rdr) - return err - } - - if bm, ok := data.(encoding.BinaryMarshaler); ok { - bytes, err := bm.MarshalBinary() - if err != nil { - return err - } - - _, err = writer.Write(bytes) - return err - } - - if data != nil { - if e, ok := data.(error); ok { - _, err := writer.Write([]byte(e.Error())) - return err - } - - v := reflect.Indirect(reflect.ValueOf(data)) - if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { - _, err := writer.Write(v.Bytes()) - return err - } - if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { - b, err := swag.WriteJSON(data) - if err != nil { - return err - } - _, err = writer.Write(b) - return err - } - } - - return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", - data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") - }) -} diff --git a/vendor/github.com/go-openapi/runtime/client/auth_info.go b/vendor/github.com/go-openapi/runtime/client/auth_info.go deleted file mode 100644 index bbe1479c..00000000 --- a/vendor/github.com/go-openapi/runtime/client/auth_info.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "encoding/base64" - - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" -) - -// PassThroughAuth never manipulates the request -var PassThroughAuth runtime.ClientAuthInfoWriter - -func init() { - PassThroughAuth = runtime.ClientAuthInfoWriterFunc(func(_ runtime.ClientRequest, _ strfmt.Registry) error { return nil }) -} - -// BasicAuth provides a basic auth info writer -func BasicAuth(username, password string) runtime.ClientAuthInfoWriter { - return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { - encoded := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) - return r.SetHeaderParam("Authorization", "Basic "+encoded) - }) -} - -// APIKeyAuth provides an API key auth info writer -func APIKeyAuth(name, in, value string) runtime.ClientAuthInfoWriter { - if in == "query" { - return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { - return r.SetQueryParam(name, value) - }) - } - - if in == "header" { - return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { - return r.SetHeaderParam(name, value) - }) - } - return nil -} - -// BearerToken provides a header based oauth2 bearer access token auth info writer -func BearerToken(token string) runtime.ClientAuthInfoWriter { - return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { - return r.SetHeaderParam("Authorization", "Bearer "+token) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/client/keepalive.go b/vendor/github.com/go-openapi/runtime/client/keepalive.go deleted file mode 100644 index f8325451..00000000 --- a/vendor/github.com/go-openapi/runtime/client/keepalive.go +++ /dev/null @@ -1,53 +0,0 @@ -package client - -import ( - "io" - "io/ioutil" - "net/http" - "sync/atomic" -) - -// KeepAliveTransport drains the remaining body from a response -// so that go will reuse the TCP connections. -// This is not enabled by default because there are servers where -// the response never gets closed and that would make the code hang forever. -// So instead it's provided as a http client middleware that can be used to override -// any request. -func KeepAliveTransport(rt http.RoundTripper) http.RoundTripper { - return &keepAliveTransport{wrapped: rt} -} - -type keepAliveTransport struct { - wrapped http.RoundTripper -} - -func (k *keepAliveTransport) RoundTrip(r *http.Request) (*http.Response, error) { - resp, err := k.wrapped.RoundTrip(r) - if err != nil { - return resp, err - } - resp.Body = &drainingReadCloser{rdr: resp.Body} - return resp, nil -} - -type drainingReadCloser struct { - rdr io.ReadCloser - seenEOF uint32 -} - -func (d *drainingReadCloser) Read(p []byte) (n int, err error) { - n, err = d.rdr.Read(p) - if err == io.EOF || n == 0 { - atomic.StoreUint32(&d.seenEOF, 1) - } - return -} - -func (d *drainingReadCloser) Close() error { - // drain buffer - if atomic.LoadUint32(&d.seenEOF) != 1 { - //#nosec - io.Copy(ioutil.Discard, d.rdr) - } - return d.rdr.Close() -} diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go deleted file mode 100644 index b7ea8b81..00000000 --- a/vendor/github.com/go-openapi/runtime/client/request.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "fmt" - "io" - "log" - "mime/multipart" - "net/http" - "net/textproto" - "net/url" - "os" - "path" - "path/filepath" - "strings" - "time" - - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" -) - -// NewRequest creates a new swagger http client request -func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) (*request, error) { - return &request{ - pathPattern: pathPattern, - method: method, - writer: writer, - header: make(http.Header), - query: make(url.Values), - timeout: DefaultTimeout, - getBody: getRequestBuffer, - }, nil -} - -// Request represents a swagger client request. -// -// This Request struct converts to a HTTP request. -// There might be others that convert to other transports. -// There is no error checking here, it is assumed to be used after a spec has been validated. -// so impossible combinations should not arise (hopefully). -// -// The main purpose of this struct is to hide the machinery of adding params to a transport request. -// The generated code only implements what is necessary to turn a param into a valid value for these methods. -type request struct { - pathPattern string - method string - writer runtime.ClientRequestWriter - - pathParams map[string]string - header http.Header - query url.Values - formFields url.Values - fileFields map[string][]runtime.NamedReadCloser - payload interface{} - timeout time.Duration - buf *bytes.Buffer - - getBody func(r *request) []byte -} - -var ( - // ensure interface compliance - _ runtime.ClientRequest = new(request) -) - -func (r *request) isMultipart(mediaType string) bool { - if len(r.fileFields) > 0 { - return true - } - - return runtime.MultipartFormMime == mediaType -} - -// BuildHTTP creates a new http request based on the data from the params -func (r *request) BuildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry) (*http.Request, error) { - return r.buildHTTP(mediaType, basePath, producers, registry, nil) -} -func escapeQuotes(s string) string { - return strings.NewReplacer("\\", "\\\\", `"`, "\\\"").Replace(s) -} -func (r *request) buildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry, auth runtime.ClientAuthInfoWriter) (*http.Request, error) { - // build the data - if err := r.writer.WriteToRequest(r, registry); err != nil { - return nil, err - } - - // Our body must be an io.Reader. - // When we create the http.Request, if we pass it a - // bytes.Buffer then it will wrap it in an io.ReadCloser - // and set the content length automatically. - var body io.Reader - var pr *io.PipeReader - var pw *io.PipeWriter - - r.buf = bytes.NewBuffer(nil) - if r.payload != nil || len(r.formFields) > 0 || len(r.fileFields) > 0 { - body = r.buf - if r.isMultipart(mediaType) { - pr, pw = io.Pipe() - body = pr - } - } - - // check if this is a form type request - if len(r.formFields) > 0 || len(r.fileFields) > 0 { - if !r.isMultipart(mediaType) { - r.header.Set(runtime.HeaderContentType, mediaType) - formString := r.formFields.Encode() - r.buf.WriteString(formString) - goto DoneChoosingBodySource - } - - mp := multipart.NewWriter(pw) - r.header.Set(runtime.HeaderContentType, mangleContentType(mediaType, mp.Boundary())) - - go func() { - defer func() { - mp.Close() - pw.Close() - }() - - for fn, v := range r.formFields { - for _, vi := range v { - if err := mp.WriteField(fn, vi); err != nil { - pw.CloseWithError(err) - log.Println(err) - } - } - } - - defer func() { - for _, ff := range r.fileFields { - for _, ffi := range ff { - ffi.Close() - } - } - }() - for fn, f := range r.fileFields { - for _, fi := range f { - buf := bytes.NewBuffer([]byte{}) - - // Need to read the data so that we can detect the content type - _, err := io.Copy(buf, fi) - if err != nil { - _ = pw.CloseWithError(err) - log.Println(err) - } - fileBytes := buf.Bytes() - fileContentType := http.DetectContentType(fileBytes) - - newFi := runtime.NamedReader(fi.Name(), buf) - - // Create the MIME headers for the new part - h := make(textproto.MIMEHeader) - h.Set("Content-Disposition", - fmt.Sprintf(`form-data; name="%s"; filename="%s"`, - escapeQuotes(fn), escapeQuotes(filepath.Base(fi.Name())))) - h.Set("Content-Type", fileContentType) - - wrtr, err := mp.CreatePart(h) - if err != nil { - pw.CloseWithError(err) - log.Println(err) - } else if _, err := io.Copy(wrtr, newFi); err != nil { - pw.CloseWithError(err) - log.Println(err) - } - } - } - }() - - goto DoneChoosingBodySource - } - - // if there is payload, use the producer to write the payload, and then - // set the header to the content-type appropriate for the payload produced - if r.payload != nil { - // TODO: infer most appropriate content type based on the producer used, - // and the `consumers` section of the spec/operation - r.header.Set(runtime.HeaderContentType, mediaType) - if rdr, ok := r.payload.(io.ReadCloser); ok { - body = rdr - goto DoneChoosingBodySource - } - - if rdr, ok := r.payload.(io.Reader); ok { - body = rdr - goto DoneChoosingBodySource - } - - producer := producers[mediaType] - if err := producer.Produce(r.buf, r.payload); err != nil { - return nil, err - } - } - -DoneChoosingBodySource: - - if runtime.CanHaveBody(r.method) && body == nil && r.header.Get(runtime.HeaderContentType) == "" { - r.header.Set(runtime.HeaderContentType, mediaType) - } - - if auth != nil { - // If we're not using r.buf as our http.Request's body, - // either the payload is an io.Reader or io.ReadCloser, - // or we're doing a multipart form/file. - // - // In those cases, if the AuthenticateRequest call asks for the body, - // we must read it into a buffer and provide that, then use that buffer - // as the body of our http.Request. - // - // This is done in-line with the GetBody() request rather than ahead - // of time, because there's no way to know if the AuthenticateRequest - // will even ask for the body of the request. - // - // If for some reason the copy fails, there's no way to return that - // error to the GetBody() call, so return it afterwards. - // - // An error from the copy action is prioritized over any error - // from the AuthenticateRequest call, because the mis-read - // body may have interfered with the auth. - // - var copyErr error - if buf, ok := body.(*bytes.Buffer); body != nil && (!ok || buf != r.buf) { - var copied bool - r.getBody = func(r *request) []byte { - if copied { - return getRequestBuffer(r) - } - - defer func() { - copied = true - }() - - if _, copyErr = io.Copy(r.buf, body); copyErr != nil { - return nil - } - - if closer, ok := body.(io.ReadCloser); ok { - if copyErr = closer.Close(); copyErr != nil { - return nil - } - } - - body = r.buf - return getRequestBuffer(r) - } - } - - authErr := auth.AuthenticateRequest(r, registry) - - if copyErr != nil { - return nil, fmt.Errorf("error retrieving the response body: %v", copyErr) - } - - if authErr != nil { - return nil, authErr - } - } - - // create http request - var reinstateSlash bool - if r.pathPattern != "" && r.pathPattern != "/" && r.pathPattern[len(r.pathPattern)-1] == '/' { - reinstateSlash = true - } - urlPath := path.Join(basePath, r.pathPattern) - for k, v := range r.pathParams { - urlPath = strings.Replace(urlPath, "{"+k+"}", url.PathEscape(v), -1) - } - if reinstateSlash { - urlPath = urlPath + "/" - } - - req, err := http.NewRequest(r.method, urlPath, body) - if err != nil { - return nil, err - } - - req.URL.RawQuery = r.query.Encode() - req.Header = r.header - - return req, nil -} - -func mangleContentType(mediaType, boundary string) string { - if strings.ToLower(mediaType) == runtime.URLencodedFormMime { - return fmt.Sprintf("%s; boundary=%s", mediaType, boundary) - } - return "multipart/form-data; boundary=" + boundary -} - -func (r *request) GetMethod() string { - return r.method -} - -func (r *request) GetPath() string { - path := r.pathPattern - for k, v := range r.pathParams { - path = strings.Replace(path, "{"+k+"}", v, -1) - } - return path -} - -func (r *request) GetBody() []byte { - return r.getBody(r) -} - -func getRequestBuffer(r *request) []byte { - if r.buf == nil { - return nil - } - return r.buf.Bytes() -} - -// SetHeaderParam adds a header param to the request -// when there is only 1 value provided for the varargs, it will set it. -// when there are several values provided for the varargs it will add it (no overriding) -func (r *request) SetHeaderParam(name string, values ...string) error { - if r.header == nil { - r.header = make(http.Header) - } - r.header[http.CanonicalHeaderKey(name)] = values - return nil -} - -// GetHeaderParams returns the all headers currently set for the request -func (r *request) GetHeaderParams() http.Header { - return r.header -} - -// SetQueryParam adds a query param to the request -// when there is only 1 value provided for the varargs, it will set it. -// when there are several values provided for the varargs it will add it (no overriding) -func (r *request) SetQueryParam(name string, values ...string) error { - if r.query == nil { - r.query = make(url.Values) - } - r.query[name] = values - return nil -} - -// GetQueryParams returns a copy of all query params currently set for the request -func (r *request) GetQueryParams() url.Values { - var result = make(url.Values) - for key, value := range r.query { - result[key] = append([]string{}, value...) - } - return result -} - -// SetFormParam adds a forn param to the request -// when there is only 1 value provided for the varargs, it will set it. -// when there are several values provided for the varargs it will add it (no overriding) -func (r *request) SetFormParam(name string, values ...string) error { - if r.formFields == nil { - r.formFields = make(url.Values) - } - r.formFields[name] = values - return nil -} - -// SetPathParam adds a path param to the request -func (r *request) SetPathParam(name string, value string) error { - if r.pathParams == nil { - r.pathParams = make(map[string]string) - } - - r.pathParams[name] = value - return nil -} - -// SetFileParam adds a file param to the request -func (r *request) SetFileParam(name string, files ...runtime.NamedReadCloser) error { - for _, file := range files { - if actualFile, ok := file.(*os.File); ok { - fi, err := os.Stat(actualFile.Name()) - if err != nil { - return err - } - if fi.IsDir() { - return fmt.Errorf("%q is a directory, only files are supported", file.Name()) - } - } - } - - if r.fileFields == nil { - r.fileFields = make(map[string][]runtime.NamedReadCloser) - } - if r.formFields == nil { - r.formFields = make(url.Values) - } - - r.fileFields[name] = files - return nil -} - -func (r *request) GetFileParam() map[string][]runtime.NamedReadCloser { - return r.fileFields -} - -// SetBodyParam sets a body parameter on the request. -// This does not yet serialze the object, this happens as late as possible. -func (r *request) SetBodyParam(payload interface{}) error { - r.payload = payload - return nil -} - -func (r *request) GetBodyParam() interface{} { - return r.payload -} - -// SetTimeout sets the timeout for a request -func (r *request) SetTimeout(timeout time.Duration) error { - r.timeout = timeout - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/client/response.go b/vendor/github.com/go-openapi/runtime/client/response.go deleted file mode 100644 index bd238588..00000000 --- a/vendor/github.com/go-openapi/runtime/client/response.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "io" - "net/http" - - "github.com/go-openapi/runtime" -) - -var _ runtime.ClientResponse = response{} - -type response struct { - resp *http.Response -} - -func (r response) Code() int { - return r.resp.StatusCode -} - -func (r response) Message() string { - return r.resp.Status -} - -func (r response) GetHeader(name string) string { - return r.resp.Header.Get(name) -} - -func (r response) Body() io.ReadCloser { - return r.resp.Body -} diff --git a/vendor/github.com/go-openapi/runtime/client/runtime.go b/vendor/github.com/go-openapi/runtime/client/runtime.go deleted file mode 100644 index 00ce53d6..00000000 --- a/vendor/github.com/go-openapi/runtime/client/runtime.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "mime" - "net/http" - "net/http/httputil" - "strings" - "sync" - "time" - - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/runtime/logger" - "github.com/go-openapi/runtime/middleware" -) - -// TLSClientOptions to configure client authentication with mutual TLS -type TLSClientOptions struct { - // Certificate is the path to a PEM-encoded certificate to be used for - // client authentication. If set then Key must also be set. - Certificate string - - // LoadedCertificate is the certificate to be used for client authentication. - // This field is ignored if Certificate is set. If this field is set, LoadedKey - // is also required. - LoadedCertificate *x509.Certificate - - // Key is the path to an unencrypted PEM-encoded private key for client - // authentication. This field is required if Certificate is set. - Key string - - // LoadedKey is the key for client authentication. This field is required if - // LoadedCertificate is set. - LoadedKey crypto.PrivateKey - - // CA is a path to a PEM-encoded certificate that specifies the root certificate - // to use when validating the TLS certificate presented by the server. If this field - // (and LoadedCA) is not set, the system certificate pool is used. This field is ignored if LoadedCA - // is set. - CA string - - // LoadedCA specifies the root certificate to use when validating the server's TLS certificate. - // If this field (and CA) is not set, the system certificate pool is used. - LoadedCA *x509.Certificate - - // LoadedCAPool specifies a pool of RootCAs to use when validating the server's TLS certificate. - // If set, it will be combined with the the other loaded certificates (see LoadedCA and CA). - // If neither LoadedCA or CA is set, the provided pool with override the system - // certificate pool. - // The caller must not use the supplied pool after calling TLSClientAuth. - LoadedCAPool *x509.CertPool - - // ServerName specifies the hostname to use when verifying the server certificate. - // If this field is set then InsecureSkipVerify will be ignored and treated as - // false. - ServerName string - - // InsecureSkipVerify controls whether the certificate chain and hostname presented - // by the server are validated. If false, any certificate is accepted. - InsecureSkipVerify bool - - // VerifyPeerCertificate, if not nil, is called after normal - // certificate verification. It receives the raw ASN.1 certificates - // provided by the peer and also any verified chains that normal processing found. - // If it returns a non-nil error, the handshake is aborted and that error results. - // - // If normal verification fails then the handshake will abort before - // considering this callback. If normal verification is disabled by - // setting InsecureSkipVerify then this callback will be considered but - // the verifiedChains argument will always be nil. - VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error - - // SessionTicketsDisabled may be set to true to disable session ticket and - // PSK (resumption) support. Note that on clients, session ticket support is - // also disabled if ClientSessionCache is nil. - SessionTicketsDisabled bool - - // ClientSessionCache is a cache of ClientSessionState entries for TLS - // session resumption. It is only used by clients. - ClientSessionCache tls.ClientSessionCache - - // Prevents callers using unkeyed fields. - _ struct{} -} - -// TLSClientAuth creates a tls.Config for mutual auth -func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) { - // create client tls config - cfg := &tls.Config{} - - // load client cert if specified - if opts.Certificate != "" { - cert, err := tls.LoadX509KeyPair(opts.Certificate, opts.Key) - if err != nil { - return nil, fmt.Errorf("tls client cert: %v", err) - } - cfg.Certificates = []tls.Certificate{cert} - } else if opts.LoadedCertificate != nil { - block := pem.Block{Type: "CERTIFICATE", Bytes: opts.LoadedCertificate.Raw} - certPem := pem.EncodeToMemory(&block) - - var keyBytes []byte - switch k := opts.LoadedKey.(type) { - case *rsa.PrivateKey: - keyBytes = x509.MarshalPKCS1PrivateKey(k) - case *ecdsa.PrivateKey: - var err error - keyBytes, err = x509.MarshalECPrivateKey(k) - if err != nil { - return nil, fmt.Errorf("tls client priv key: %v", err) - } - default: - return nil, fmt.Errorf("tls client priv key: unsupported key type") - } - - block = pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes} - keyPem := pem.EncodeToMemory(&block) - - cert, err := tls.X509KeyPair(certPem, keyPem) - if err != nil { - return nil, fmt.Errorf("tls client cert: %v", err) - } - cfg.Certificates = []tls.Certificate{cert} - } - - cfg.InsecureSkipVerify = opts.InsecureSkipVerify - - cfg.VerifyPeerCertificate = opts.VerifyPeerCertificate - cfg.SessionTicketsDisabled = opts.SessionTicketsDisabled - cfg.ClientSessionCache = opts.ClientSessionCache - - // When no CA certificate is provided, default to the system cert pool - // that way when a request is made to a server known by the system trust store, - // the name is still verified - if opts.LoadedCA != nil { - caCertPool := basePool(opts.LoadedCAPool) - caCertPool.AddCert(opts.LoadedCA) - cfg.RootCAs = caCertPool - } else if opts.CA != "" { - // load ca cert - caCert, err := ioutil.ReadFile(opts.CA) - if err != nil { - return nil, fmt.Errorf("tls client ca: %v", err) - } - caCertPool := basePool(opts.LoadedCAPool) - caCertPool.AppendCertsFromPEM(caCert) - cfg.RootCAs = caCertPool - } else if opts.LoadedCAPool != nil { - cfg.RootCAs = opts.LoadedCAPool - } - - // apply servername overrride - if opts.ServerName != "" { - cfg.InsecureSkipVerify = false - cfg.ServerName = opts.ServerName - } - - cfg.BuildNameToCertificate() - - return cfg, nil -} - -func basePool(pool *x509.CertPool) *x509.CertPool { - if pool == nil { - return x509.NewCertPool() - } - return pool -} - -// TLSTransport creates a http client transport suitable for mutual tls auth -func TLSTransport(opts TLSClientOptions) (http.RoundTripper, error) { - cfg, err := TLSClientAuth(opts) - if err != nil { - return nil, err - } - - return &http.Transport{TLSClientConfig: cfg}, nil -} - -// TLSClient creates a http.Client for mutual auth -func TLSClient(opts TLSClientOptions) (*http.Client, error) { - transport, err := TLSTransport(opts) - if err != nil { - return nil, err - } - return &http.Client{Transport: transport}, nil -} - -// DefaultTimeout the default request timeout -var DefaultTimeout = 30 * time.Second - -// Runtime represents an API client that uses the transport -// to make http requests based on a swagger specification. -type Runtime struct { - DefaultMediaType string - DefaultAuthentication runtime.ClientAuthInfoWriter - Consumers map[string]runtime.Consumer - Producers map[string]runtime.Producer - - Transport http.RoundTripper - Jar http.CookieJar - //Spec *spec.Document - Host string - BasePath string - Formats strfmt.Registry - Context context.Context - - Debug bool - logger logger.Logger - - clientOnce *sync.Once - client *http.Client - schemes []string -} - -// New creates a new default runtime for a swagger api runtime.Client -func New(host, basePath string, schemes []string) *Runtime { - var rt Runtime - rt.DefaultMediaType = runtime.JSONMime - - // TODO: actually infer this stuff from the spec - rt.Consumers = map[string]runtime.Consumer{ - runtime.JSONMime: runtime.JSONConsumer(), - runtime.XMLMime: runtime.XMLConsumer(), - runtime.TextMime: runtime.TextConsumer(), - runtime.HTMLMime: runtime.TextConsumer(), - runtime.CSVMime: runtime.CSVConsumer(), - runtime.DefaultMime: runtime.ByteStreamConsumer(), - } - rt.Producers = map[string]runtime.Producer{ - runtime.JSONMime: runtime.JSONProducer(), - runtime.XMLMime: runtime.XMLProducer(), - runtime.TextMime: runtime.TextProducer(), - runtime.HTMLMime: runtime.TextProducer(), - runtime.CSVMime: runtime.CSVProducer(), - runtime.DefaultMime: runtime.ByteStreamProducer(), - } - rt.Transport = http.DefaultTransport - rt.Jar = nil - rt.Host = host - rt.BasePath = basePath - rt.Context = context.Background() - rt.clientOnce = new(sync.Once) - if !strings.HasPrefix(rt.BasePath, "/") { - rt.BasePath = "/" + rt.BasePath - } - - rt.Debug = logger.DebugEnabled() - rt.logger = logger.StandardLogger{} - - if len(schemes) > 0 { - rt.schemes = schemes - } - return &rt -} - -// NewWithClient allows you to create a new transport with a configured http.Client -func NewWithClient(host, basePath string, schemes []string, client *http.Client) *Runtime { - rt := New(host, basePath, schemes) - if client != nil { - rt.clientOnce.Do(func() { - rt.client = client - }) - } - return rt -} - -func (r *Runtime) pickScheme(schemes []string) string { - if v := r.selectScheme(r.schemes); v != "" { - return v - } - if v := r.selectScheme(schemes); v != "" { - return v - } - return "http" -} - -func (r *Runtime) selectScheme(schemes []string) string { - schLen := len(schemes) - if schLen == 0 { - return "" - } - - scheme := schemes[0] - // prefer https, but skip when not possible - if scheme != "https" && schLen > 1 { - for _, sch := range schemes { - if sch == "https" { - scheme = sch - break - } - } - } - return scheme -} -func transportOrDefault(left, right http.RoundTripper) http.RoundTripper { - if left == nil { - return right - } - return left -} - -// EnableConnectionReuse drains the remaining body from a response -// so that go will reuse the TCP connections. -// -// This is not enabled by default because there are servers where -// the response never gets closed and that would make the code hang forever. -// So instead it's provided as a http client middleware that can be used to override -// any request. -func (r *Runtime) EnableConnectionReuse() { - if r.client == nil { - r.Transport = KeepAliveTransport( - transportOrDefault(r.Transport, http.DefaultTransport), - ) - return - } - - r.client.Transport = KeepAliveTransport( - transportOrDefault(r.client.Transport, - transportOrDefault(r.Transport, http.DefaultTransport), - ), - ) -} - -// Submit a request and when there is a body on success it will turn that into the result -// all other things are turned into an api error for swagger which retains the status code -func (r *Runtime) Submit(operation *runtime.ClientOperation) (interface{}, error) { - params, readResponse, auth := operation.Params, operation.Reader, operation.AuthInfo - - request, err := newRequest(operation.Method, operation.PathPattern, params) - if err != nil { - return nil, err - } - - var accept []string - accept = append(accept, operation.ProducesMediaTypes...) - if err = request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil { - return nil, err - } - - if auth == nil && r.DefaultAuthentication != nil { - auth = r.DefaultAuthentication - } - //if auth != nil { - // if err := auth.AuthenticateRequest(request, r.Formats); err != nil { - // return nil, err - // } - //} - - // TODO: pick appropriate media type - cmt := r.DefaultMediaType - for _, mediaType := range operation.ConsumesMediaTypes { - // Pick first non-empty media type - if mediaType != "" { - cmt = mediaType - break - } - } - - if _, ok := r.Producers[cmt]; !ok && cmt != runtime.MultipartFormMime && cmt != runtime.URLencodedFormMime { - return nil, fmt.Errorf("none of producers: %v registered. try %s", r.Producers, cmt) - } - - req, err := request.buildHTTP(cmt, r.BasePath, r.Producers, r.Formats, auth) - if err != nil { - return nil, err - } - req.URL.Scheme = r.pickScheme(operation.Schemes) - req.URL.Host = r.Host - req.Host = r.Host - - r.clientOnce.Do(func() { - r.client = &http.Client{ - Transport: r.Transport, - Jar: r.Jar, - } - }) - - if r.Debug { - b, err2 := httputil.DumpRequestOut(req, true) - if err2 != nil { - return nil, err2 - } - r.logger.Debugf("%s\n", string(b)) - } - - var hasTimeout bool - pctx := operation.Context - if pctx == nil { - pctx = r.Context - } else { - hasTimeout = true - } - if pctx == nil { - pctx = context.Background() - } - var ctx context.Context - var cancel context.CancelFunc - if hasTimeout { - ctx, cancel = context.WithCancel(pctx) - } else { - ctx, cancel = context.WithTimeout(pctx, request.timeout) - } - defer cancel() - - client := operation.Client - if client == nil { - client = r.client - } - req = req.WithContext(ctx) - res, err := client.Do(req) // make requests, by default follows 10 redirects before failing - if err != nil { - return nil, err - } - defer res.Body.Close() - - if r.Debug { - b, err2 := httputil.DumpResponse(res, true) - if err2 != nil { - return nil, err2 - } - r.logger.Debugf("%s\n", string(b)) - } - - ct := res.Header.Get(runtime.HeaderContentType) - if ct == "" { // this should really really never occur - ct = r.DefaultMediaType - } - - mt, _, err := mime.ParseMediaType(ct) - if err != nil { - return nil, fmt.Errorf("parse content type: %s", err) - } - - cons, ok := r.Consumers[mt] - if !ok { - if cons, ok = r.Consumers["*/*"]; !ok { - // scream about not knowing what to do - return nil, fmt.Errorf("no consumer: %q", ct) - } - } - return readResponse.ReadResponse(response{res}, cons) -} - -// SetDebug changes the debug flag. -// It ensures that client and middlewares have the set debug level. -func (r *Runtime) SetDebug(debug bool) { - r.Debug = debug - middleware.Debug = debug -} - -// SetLogger changes the logger stream. -// It ensures that client and middlewares use the same logger. -func (r *Runtime) SetLogger(logger logger.Logger) { - r.logger = logger - middleware.Logger = logger -} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go deleted file mode 100644 index c6c97d9a..00000000 --- a/vendor/github.com/go-openapi/runtime/client_auth_info.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "github.com/go-openapi/strfmt" - -// A ClientAuthInfoWriterFunc converts a function to a request writer interface -type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error - -// AuthenticateRequest adds authentication data to the request -func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { - return fn(req, reg) -} - -// A ClientAuthInfoWriter implementor knows how to write authentication info to a request -type ClientAuthInfoWriter interface { - AuthenticateRequest(ClientRequest, strfmt.Registry) error -} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go deleted file mode 100644 index fa21eacf..00000000 --- a/vendor/github.com/go-openapi/runtime/client_operation.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "context" - "net/http" -) - -// ClientOperation represents the context for a swagger operation to be submitted to the transport -type ClientOperation struct { - ID string - Method string - PathPattern string - ProducesMediaTypes []string - ConsumesMediaTypes []string - Schemes []string - AuthInfo ClientAuthInfoWriter - Params ClientRequestWriter - Reader ClientResponseReader - Context context.Context - Client *http.Client -} - -// A ClientTransport implementor knows how to submit Request objects to some destination -type ClientTransport interface { - //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) - Submit(*ClientOperation) (interface{}, error) -} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go deleted file mode 100644 index 6215e0a1..00000000 --- a/vendor/github.com/go-openapi/runtime/client_request.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "io" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/go-openapi/strfmt" -) - -// ClientRequestWriterFunc converts a function to a request writer interface -type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error - -// WriteToRequest adds data to the request -func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { - return fn(req, reg) -} - -// ClientRequestWriter is an interface for things that know how to write to a request -type ClientRequestWriter interface { - WriteToRequest(ClientRequest, strfmt.Registry) error -} - -// ClientRequest is an interface for things that know how to -// add information to a swagger client request -type ClientRequest interface { - SetHeaderParam(string, ...string) error - - GetHeaderParams() http.Header - - SetQueryParam(string, ...string) error - - SetFormParam(string, ...string) error - - SetPathParam(string, string) error - - GetQueryParams() url.Values - - SetFileParam(string, ...NamedReadCloser) error - - SetBodyParam(interface{}) error - - SetTimeout(time.Duration) error - - GetMethod() string - - GetPath() string - - GetBody() []byte - - GetBodyParam() interface{} - - GetFileParam() map[string][]NamedReadCloser -} - -// NamedReadCloser represents a named ReadCloser interface -type NamedReadCloser interface { - io.ReadCloser - Name() string -} - -// NamedReader creates a NamedReadCloser for use as file upload -func NamedReader(name string, rdr io.Reader) NamedReadCloser { - rc, ok := rdr.(io.ReadCloser) - if !ok { - rc = ioutil.NopCloser(rdr) - } - return &namedReadCloser{ - name: name, - cr: rc, - } -} - -type namedReadCloser struct { - name string - cr io.ReadCloser -} - -func (n *namedReadCloser) Close() error { - return n.cr.Close() -} -func (n *namedReadCloser) Read(p []byte) (int, error) { - return n.cr.Read(p) -} -func (n *namedReadCloser) Name() string { - return n.name -} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go deleted file mode 100644 index 729e18b2..00000000 --- a/vendor/github.com/go-openapi/runtime/client_response.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "fmt" - "io" -) - -// A ClientResponse represents a client response -// This bridges between responses obtained from different transports -type ClientResponse interface { - Code() int - Message() string - GetHeader(string) string - Body() io.ReadCloser -} - -// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation -type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) - -// ReadResponse reads the response -func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { - return read(resp, consumer) -} - -// A ClientResponseReader is an interface for things want to read a response. -// An application of this is to create structs from response values -type ClientResponseReader interface { - ReadResponse(ClientResponse, Consumer) (interface{}, error) -} - -// NewAPIError creates a new API error -func NewAPIError(opName string, payload interface{}, code int) *APIError { - return &APIError{ - OperationName: opName, - Response: payload, - Code: code, - } -} - -// APIError wraps an error model and captures the status code -type APIError struct { - OperationName string - Response interface{} - Code int -} - -func (a *APIError) Error() string { - return fmt.Sprintf("%s (status %d): %+v ", a.OperationName, a.Code, a.Response) -} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go deleted file mode 100644 index a4de897a..00000000 --- a/vendor/github.com/go-openapi/runtime/constants.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -const ( - // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type - HeaderContentType = "Content-Type" - - // HeaderTransferEncoding represents a http transfer-encoding header. - HeaderTransferEncoding = "Transfer-Encoding" - - // HeaderAccept the Accept header - HeaderAccept = "Accept" - - charsetKey = "charset" - - // DefaultMime the default fallback mime type - DefaultMime = "application/octet-stream" - // JSONMime the json mime type - JSONMime = "application/json" - // YAMLMime the yaml mime type - YAMLMime = "application/x-yaml" - // XMLMime the xml mime type - XMLMime = "application/xml" - // TextMime the text mime type - TextMime = "text/plain" - // HTMLMime the html mime type - HTMLMime = "text/html" - // CSVMime the csv mime type - CSVMime = "text/csv" - // MultipartFormMime the multipart form mime type - MultipartFormMime = "multipart/form-data" - // URLencodedFormMime the url encoded form mime type - URLencodedFormMime = "application/x-www-form-urlencoded" -) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go deleted file mode 100644 index d807bd91..00000000 --- a/vendor/github.com/go-openapi/runtime/csv.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding/csv" - "errors" - "io" -) - -// CSVConsumer creates a new CSV consumer -func CSVConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("CSVConsumer requires a reader") - } - - csvReader := csv.NewReader(reader) - writer, ok := data.(io.Writer) - if !ok { - return errors.New("data type must be io.Writer") - } - csvWriter := csv.NewWriter(writer) - records, err := csvReader.ReadAll() - if err != nil { - return err - } - for _, r := range records { - if err := csvWriter.Write(r); err != nil { - return err - } - } - csvWriter.Flush() - return nil - }) -} - -// CSVProducer creates a new CSV producer -func CSVProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("CSVProducer requires a writer") - } - - dataBytes, ok := data.([]byte) - if !ok { - return errors.New("data type must be byte array") - } - - csvReader := csv.NewReader(bytes.NewBuffer(dataBytes)) - records, err := csvReader.ReadAll() - if err != nil { - return err - } - csvWriter := csv.NewWriter(writer) - for _, r := range records { - if err := csvWriter.Write(r); err != nil { - return err - } - } - csvWriter.Flush() - return nil - }) -} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go deleted file mode 100644 index 0d390cfd..00000000 --- a/vendor/github.com/go-openapi/runtime/discard.go +++ /dev/null @@ -1,9 +0,0 @@ -package runtime - -import "io" - -// DiscardConsumer does absolutely nothing, it's a black hole. -var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) - -// DiscardProducer does absolutely nothing, it's a black hole. -var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go deleted file mode 100644 index 85971c18..00000000 --- a/vendor/github.com/go-openapi/runtime/file.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "mime/multipart" - -// File represents an uploaded file. -type File struct { - Data multipart.File - Header *multipart.FileHeader -} - -// Read bytes from the file -func (f *File) Read(p []byte) (n int, err error) { - return f.Data.Read(p) -} - -// Close the file -func (f *File) Close() error { - return f.Data.Close() -} diff --git a/vendor/github.com/go-openapi/runtime/go.mod b/vendor/github.com/go-openapi/runtime/go.mod deleted file mode 100644 index f5f9d16b..00000000 --- a/vendor/github.com/go-openapi/runtime/go.mod +++ /dev/null @@ -1,16 +0,0 @@ -module github.com/go-openapi/runtime - -require ( - github.com/docker/go-units v0.4.0 - github.com/go-openapi/analysis v0.19.5 - github.com/go-openapi/errors v0.19.2 - github.com/go-openapi/loads v0.19.3 - github.com/go-openapi/spec v0.19.3 - github.com/go-openapi/strfmt v0.19.3 - github.com/go-openapi/swag v0.19.5 - github.com/go-openapi/validate v0.19.3 - github.com/stretchr/testify v1.4.0 - gopkg.in/yaml.v2 v2.2.4 -) - -go 1.13 diff --git a/vendor/github.com/go-openapi/runtime/go.sum b/vendor/github.com/go-openapi/runtime/go.sum deleted file mode 100644 index c24bb986..00000000 --- a/vendor/github.com/go-openapi/runtime/go.sum +++ /dev/null @@ -1,149 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2 h1:ophLETFestFZHk3ji7niPEL4d466QjW+0Tdg5VyDq7E= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2 h1:rf5ArTHmIJxyV5Oiks+Su0mUens1+AjpkPoWr5xFRcI= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3 h1:jwIoahqCmaA5OBoc/B+1+Mu2L0Gr8xYQnbeyQEo/7b0= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2 h1:ky5l57HjyVRrsJfd2+Ro5Z9PjGuKbsmftwyMtk8H7js= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3 h1:PAH/2DylwWcIU1s0Y7k3yNmeAgWOcKrNE2Q7Ww/kCg4= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe h1:W/GaMY0y69G4cFlmsC6B9sbuo2fP8OFP1ABjt4kPz+w= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53 h1:kcXqo9vE6fsZY5X5Rd7R1l7fTgnWaDCVmln65REefiE= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go deleted file mode 100644 index 4d111db4..00000000 --- a/vendor/github.com/go-openapi/runtime/headers.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "mime" - "net/http" - - "github.com/go-openapi/errors" -) - -// ContentType parses a content type header -func ContentType(headers http.Header) (string, string, error) { - ct := headers.Get(HeaderContentType) - orig := ct - if ct == "" { - ct = DefaultMime - } - if ct == "" { - return "", "", nil - } - - mt, opts, err := mime.ParseMediaType(ct) - if err != nil { - return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) - } - - if cs, ok := opts[charsetKey]; ok { - return mt, cs, nil - } - - return mt, "", nil -} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go deleted file mode 100644 index 65de0aa4..00000000 --- a/vendor/github.com/go-openapi/runtime/interfaces.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "io" - "net/http" - - "github.com/go-openapi/strfmt" -) - -// OperationHandlerFunc an adapter for a function to the OperationHandler interface -type OperationHandlerFunc func(interface{}) (interface{}, error) - -// Handle implements the operation handler interface -func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { - return s(data) -} - -// OperationHandler a handler for a swagger operation -type OperationHandler interface { - Handle(interface{}) (interface{}, error) -} - -// ConsumerFunc represents a function that can be used as a consumer -type ConsumerFunc func(io.Reader, interface{}) error - -// Consume consumes the reader into the data parameter -func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { - return fn(reader, data) -} - -// Consumer implementations know how to bind the values on the provided interface to -// data provided by the request body -type Consumer interface { - // Consume performs the binding of request values - Consume(io.Reader, interface{}) error -} - -// ProducerFunc represents a function that can be used as a producer -type ProducerFunc func(io.Writer, interface{}) error - -// Produce produces the response for the provided data -func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { - return f(writer, data) -} - -// Producer implementations know how to turn the provided interface into a valid -// HTTP response -type Producer interface { - // Produce writes to the http response - Produce(io.Writer, interface{}) error -} - -// AuthenticatorFunc turns a function into an authenticator -type AuthenticatorFunc func(interface{}) (bool, interface{}, error) - -// Authenticate authenticates the request with the provided data -func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { - return f(params) -} - -// Authenticator represents an authentication strategy -// implementations of Authenticator know how to authenticate the -// request data and translate that into a valid principal object or an error -type Authenticator interface { - Authenticate(interface{}) (bool, interface{}, error) -} - -// AuthorizerFunc turns a function into an authorizer -type AuthorizerFunc func(*http.Request, interface{}) error - -// Authorize authorizes the processing of the request for the principal -func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { - return f(r, principal) -} - -// Authorizer represents an authorization strategy -// implementations of Authorizer know how to authorize the principal object -// using the request data and returns error if unauthorized -type Authorizer interface { - Authorize(*http.Request, interface{}) error -} - -// Validatable types implementing this interface allow customizing their validation -// this will be used instead of the reflective validation based on the spec document. -// the implementations are assumed to have been generated by the swagger tool so they should -// contain all the validations obtained from the spec -type Validatable interface { - Validate(strfmt.Registry) error -} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go deleted file mode 100644 index 5a690559..00000000 --- a/vendor/github.com/go-openapi/runtime/json.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/json" - "io" -) - -// JSONConsumer creates a new JSON consumer -func JSONConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - dec := json.NewDecoder(reader) - dec.UseNumber() // preserve number formats - return dec.Decode(data) - }) -} - -// JSONProducer creates a new JSON producer -func JSONProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - enc := json.NewEncoder(writer) - enc.SetEscapeHTML(false) - return enc.Encode(data) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/logger/logger.go b/vendor/github.com/go-openapi/runtime/logger/logger.go deleted file mode 100644 index 6f4debcc..00000000 --- a/vendor/github.com/go-openapi/runtime/logger/logger.go +++ /dev/null @@ -1,20 +0,0 @@ -package logger - -import "os" - -type Logger interface { - Printf(format string, args ...interface{}) - Debugf(format string, args ...interface{}) -} - -func DebugEnabled() bool { - d := os.Getenv("SWAGGER_DEBUG") - if d != "" && d != "false" && d != "0" { - return true - } - d = os.Getenv("DEBUG") - if d != "" && d != "false" && d != "0" { - return true - } - return false -} diff --git a/vendor/github.com/go-openapi/runtime/logger/standard.go b/vendor/github.com/go-openapi/runtime/logger/standard.go deleted file mode 100644 index f7e67ebb..00000000 --- a/vendor/github.com/go-openapi/runtime/logger/standard.go +++ /dev/null @@ -1,22 +0,0 @@ -package logger - -import ( - "fmt" - "os" -) - -type StandardLogger struct{} - -func (StandardLogger) Printf(format string, args ...interface{}) { - if len(format) == 0 || format[len(format)-1] != '\n' { - format += "\n" - } - fmt.Fprintf(os.Stderr, format, args...) -} - -func (StandardLogger) Debugf(format string, args ...interface{}) { - if len(format) == 0 || format[len(format)-1] != '\n' { - format += "\n" - } - fmt.Fprintf(os.Stderr, format, args...) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/context.go b/vendor/github.com/go-openapi/runtime/middleware/context.go deleted file mode 100644 index 0ff9e39a..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/context.go +++ /dev/null @@ -1,592 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - stdContext "context" - "fmt" - "net/http" - "strings" - "sync" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/errors" - "github.com/go-openapi/loads" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/runtime/logger" - "github.com/go-openapi/runtime/middleware/untyped" - "github.com/go-openapi/runtime/security" -) - -// Debug when true turns on verbose logging -var Debug = logger.DebugEnabled() -var Logger logger.Logger = logger.StandardLogger{} - -func debugLog(format string, args ...interface{}) { - if Debug { - Logger.Printf(format, args...) - } -} - -// A Builder can create middlewares -type Builder func(http.Handler) http.Handler - -// PassthroughBuilder returns the handler, aka the builder identity function -func PassthroughBuilder(handler http.Handler) http.Handler { return handler } - -// RequestBinder is an interface for types to implement -// when they want to be able to bind from a request -type RequestBinder interface { - BindRequest(*http.Request, *MatchedRoute) error -} - -// Responder is an interface for types to implement -// when they want to be considered for writing HTTP responses -type Responder interface { - WriteResponse(http.ResponseWriter, runtime.Producer) -} - -// ResponderFunc wraps a func as a Responder interface -type ResponderFunc func(http.ResponseWriter, runtime.Producer) - -// WriteResponse writes to the response -func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Producer) { - fn(rw, pr) -} - -// Context is a type safe wrapper around an untyped request context -// used throughout to store request context with the standard context attached -// to the http.Request -type Context struct { - spec *loads.Document - analyzer *analysis.Spec - api RoutableAPI - router Router -} - -type routableUntypedAPI struct { - api *untyped.API - hlock *sync.Mutex - handlers map[string]map[string]http.Handler - defaultConsumes string - defaultProduces string -} - -func newRoutableUntypedAPI(spec *loads.Document, api *untyped.API, context *Context) *routableUntypedAPI { - var handlers map[string]map[string]http.Handler - if spec == nil || api == nil { - return nil - } - analyzer := analysis.New(spec.Spec()) - for method, hls := range analyzer.Operations() { - um := strings.ToUpper(method) - for path, op := range hls { - schemes := analyzer.SecurityRequirementsFor(op) - - if oh, ok := api.OperationHandlerFor(method, path); ok { - if handlers == nil { - handlers = make(map[string]map[string]http.Handler) - } - if b, ok := handlers[um]; !ok || b == nil { - handlers[um] = make(map[string]http.Handler) - } - - var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // lookup route info in the context - route, rCtx, _ := context.RouteInfo(r) - if rCtx != nil { - r = rCtx - } - - // bind and validate the request using reflection - var bound interface{} - var validation error - bound, r, validation = context.BindAndValidate(r, route) - if validation != nil { - context.Respond(w, r, route.Produces, route, validation) - return - } - - // actually handle the request - result, err := oh.Handle(bound) - if err != nil { - // respond with failure - context.Respond(w, r, route.Produces, route, err) - return - } - - // respond with success - context.Respond(w, r, route.Produces, route, result) - }) - - if len(schemes) > 0 { - handler = newSecureAPI(context, handler) - } - handlers[um][path] = handler - } - } - } - - return &routableUntypedAPI{ - api: api, - hlock: new(sync.Mutex), - handlers: handlers, - defaultProduces: api.DefaultProduces, - defaultConsumes: api.DefaultConsumes, - } -} - -func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool) { - r.hlock.Lock() - paths, ok := r.handlers[strings.ToUpper(method)] - if !ok { - r.hlock.Unlock() - return nil, false - } - handler, ok := paths[path] - r.hlock.Unlock() - return handler, ok -} -func (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) { - return r.api.ServeError -} -func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { - return r.api.ConsumersFor(mediaTypes) -} -func (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer { - return r.api.ProducersFor(mediaTypes) -} -func (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { - return r.api.AuthenticatorsFor(schemes) -} -func (r *routableUntypedAPI) Authorizer() runtime.Authorizer { - return r.api.Authorizer() -} -func (r *routableUntypedAPI) Formats() strfmt.Registry { - return r.api.Formats() -} - -func (r *routableUntypedAPI) DefaultProduces() string { - return r.defaultProduces -} - -func (r *routableUntypedAPI) DefaultConsumes() string { - return r.defaultConsumes -} - -// NewRoutableContext creates a new context for a routable API -func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context { - var an *analysis.Spec - if spec != nil { - an = analysis.New(spec.Spec()) - } - ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes} - return ctx -} - -// NewContext creates a new context wrapper -func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context { - var an *analysis.Spec - if spec != nil { - an = analysis.New(spec.Spec()) - } - ctx := &Context{spec: spec, analyzer: an} - ctx.api = newRoutableUntypedAPI(spec, api, ctx) - ctx.router = routes - return ctx -} - -// Serve serves the specified spec with the specified api registrations as a http.Handler -func Serve(spec *loads.Document, api *untyped.API) http.Handler { - return ServeWithBuilder(spec, api, PassthroughBuilder) -} - -// ServeWithBuilder serves the specified spec with the specified api registrations as a http.Handler that is decorated -// by the Builder -func ServeWithBuilder(spec *loads.Document, api *untyped.API, builder Builder) http.Handler { - context := NewContext(spec, api, nil) - return context.APIHandler(builder) -} - -type contextKey int8 - -const ( - _ contextKey = iota - ctxContentType - ctxResponseFormat - ctxMatchedRoute - ctxBoundParams - ctxSecurityPrincipal - ctxSecurityScopes -) - -// MatchedRouteFrom request context value. -func MatchedRouteFrom(req *http.Request) *MatchedRoute { - mr := req.Context().Value(ctxMatchedRoute) - if mr == nil { - return nil - } - if res, ok := mr.(*MatchedRoute); ok { - return res - } - return nil -} - -// SecurityPrincipalFrom request context value. -func SecurityPrincipalFrom(req *http.Request) interface{} { - return req.Context().Value(ctxSecurityPrincipal) -} - -// SecurityScopesFrom request context value. -func SecurityScopesFrom(req *http.Request) []string { - rs := req.Context().Value(ctxSecurityScopes) - if res, ok := rs.([]string); ok { - return res - } - return nil -} - -type contentTypeValue struct { - MediaType string - Charset string -} - -// BasePath returns the base path for this API -func (c *Context) BasePath() string { - return c.spec.BasePath() -} - -// RequiredProduces returns the accepted content types for responses -func (c *Context) RequiredProduces() []string { - return c.analyzer.RequiredProduces() -} - -// BindValidRequest binds a params object to a request but only when the request is valid -// if the request is not valid an error will be returned -func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error { - var res []error - var requestContentType string - - // check and validate content type, select consumer - if runtime.HasBody(request) { - ct, _, err := runtime.ContentType(request.Header) - if err != nil { - res = append(res, err) - } else { - if err := validateContentType(route.Consumes, ct); err != nil { - res = append(res, err) - } - if len(res) == 0 { - cons, ok := route.Consumers[ct] - if !ok { - res = append(res, errors.New(500, "no consumer registered for %s", ct)) - } else { - route.Consumer = cons - requestContentType = ct - } - } - } - } - - // check and validate the response format - if len(res) == 0 { - if str := NegotiateContentType(request, route.Produces, requestContentType); str == "" { - res = append(res, errors.InvalidResponseFormat(request.Header.Get(runtime.HeaderAccept), route.Produces)) - } - } - - // now bind the request with the provided binder - // it's assumed the binder will also validate the request and return an error if the - // request is invalid - if binder != nil && len(res) == 0 { - if err := binder.BindRequest(request, route); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// ContentType gets the parsed value of a content type -// Returns the media type, its charset and a shallow copy of the request -// when its context doesn't contain the content type value, otherwise it returns -// the same request -// Returns the error that runtime.ContentType may retunrs. -func (c *Context) ContentType(request *http.Request) (string, string, *http.Request, error) { - var rCtx = request.Context() - - if v, ok := rCtx.Value(ctxContentType).(*contentTypeValue); ok { - return v.MediaType, v.Charset, request, nil - } - - mt, cs, err := runtime.ContentType(request.Header) - if err != nil { - return "", "", nil, err - } - rCtx = stdContext.WithValue(rCtx, ctxContentType, &contentTypeValue{mt, cs}) - return mt, cs, request.WithContext(rCtx), nil -} - -// LookupRoute looks a route up and returns true when it is found -func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) { - if route, ok := c.router.Lookup(request.Method, request.URL.EscapedPath()); ok { - return route, ok - } - return nil, false -} - -// RouteInfo tries to match a route for this request -// Returns the matched route, a shallow copy of the request if its context -// contains the matched router, otherwise the same request, and a bool to -// indicate if it the request matches one of the routes, if it doesn't -// then it returns false and nil for the other two return values -func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request, bool) { - var rCtx = request.Context() - - if v, ok := rCtx.Value(ctxMatchedRoute).(*MatchedRoute); ok { - return v, request, ok - } - - if route, ok := c.LookupRoute(request); ok { - rCtx = stdContext.WithValue(rCtx, ctxMatchedRoute, route) - return route, request.WithContext(rCtx), ok - } - - return nil, nil, false -} - -// ResponseFormat negotiates the response content type -// Returns the response format and a shallow copy of the request if its context -// doesn't contain the response format, otherwise the same request -func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *http.Request) { - var rCtx = r.Context() - - if v, ok := rCtx.Value(ctxResponseFormat).(string); ok { - debugLog("[%s %s] found response format %q in context", r.Method, r.URL.Path, v) - return v, r - } - - format := NegotiateContentType(r, offers, "") - if format != "" { - debugLog("[%s %s] set response format %q in context", r.Method, r.URL.Path, format) - r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format)) - } - debugLog("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format) - return format, r -} - -// AllowedMethods gets the allowed methods for the path of this request -func (c *Context) AllowedMethods(request *http.Request) []string { - return c.router.OtherMethods(request.Method, request.URL.EscapedPath()) -} - -// ResetAuth removes the current principal from the request context -func (c *Context) ResetAuth(request *http.Request) *http.Request { - rctx := request.Context() - rctx = stdContext.WithValue(rctx, ctxSecurityPrincipal, nil) - rctx = stdContext.WithValue(rctx, ctxSecurityScopes, nil) - return request.WithContext(rctx) -} - -// Authorize authorizes the request -// Returns the principal object and a shallow copy of the request when its -// context doesn't contain the principal, otherwise the same request or an error -// (the last) if one of the authenticators returns one or an Unauthenticated error -func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, *http.Request, error) { - if route == nil || !route.HasAuth() { - return nil, nil, nil - } - - var rCtx = request.Context() - if v := rCtx.Value(ctxSecurityPrincipal); v != nil { - return v, request, nil - } - - applies, usr, err := route.Authenticators.Authenticate(request, route) - if !applies || err != nil || !route.Authenticators.AllowsAnonymous() && usr == nil { - if err != nil { - return nil, nil, err - } - return nil, nil, errors.Unauthenticated("invalid credentials") - } - if route.Authorizer != nil { - if err := route.Authorizer.Authorize(request, usr); err != nil { - return nil, nil, errors.New(http.StatusForbidden, err.Error()) - } - } - - rCtx = request.Context() - - rCtx = stdContext.WithValue(rCtx, ctxSecurityPrincipal, usr) - rCtx = stdContext.WithValue(rCtx, ctxSecurityScopes, route.Authenticator.AllScopes()) - return usr, request.WithContext(rCtx), nil -} - -// BindAndValidate binds and validates the request -// Returns the validation map and a shallow copy of the request when its context -// doesn't contain the validation, otherwise it returns the same request or an -// CompositeValidationError error -func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (interface{}, *http.Request, error) { - var rCtx = request.Context() - - if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok { - debugLog("got cached validation (valid: %t)", len(v.result) == 0) - if len(v.result) > 0 { - return v.bound, request, errors.CompositeValidationError(v.result...) - } - return v.bound, request, nil - } - result := validateRequest(c, request, matched) - rCtx = stdContext.WithValue(rCtx, ctxBoundParams, result) - request = request.WithContext(rCtx) - if len(result.result) > 0 { - return result.bound, request, errors.CompositeValidationError(result.result...) - } - debugLog("no validation errors found") - return result.bound, request, nil -} - -// NotFound the default not found responder for when no route has been matched yet -func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) { - c.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound("not found")) -} - -// Respond renders the response after doing some content negotiation -func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) { - debugLog("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces) - offers := []string{} - for _, mt := range produces { - if mt != c.api.DefaultProduces() { - offers = append(offers, mt) - } - } - // the default producer is last so more specific producers take precedence - offers = append(offers, c.api.DefaultProduces()) - debugLog("offers: %v", offers) - - var format string - format, r = c.ResponseFormat(r, offers) - rw.Header().Set(runtime.HeaderContentType, format) - - if resp, ok := data.(Responder); ok { - producers := route.Producers - prod, ok := producers[format] - if !ok { - prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) - pr, ok := prods[c.api.DefaultProduces()] - if !ok { - panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) - } - prod = pr - } - resp.WriteResponse(rw, prod) - return - } - - if err, ok := data.(error); ok { - if format == "" { - rw.Header().Set(runtime.HeaderContentType, runtime.JSONMime) - } - - if realm := security.FailedBasicAuth(r); realm != "" { - rw.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", realm)) - } - - if route == nil || route.Operation == nil { - c.api.ServeErrorFor("")(rw, r, err) - return - } - c.api.ServeErrorFor(route.Operation.ID)(rw, r, err) - return - } - - if route == nil || route.Operation == nil { - rw.WriteHeader(200) - if r.Method == "HEAD" { - return - } - producers := c.api.ProducersFor(normalizeOffers(offers)) - prod, ok := producers[format] - if !ok { - panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) - } - if err := prod.Produce(rw, data); err != nil { - panic(err) // let the recovery middleware deal with this - } - return - } - - if _, code, ok := route.Operation.SuccessResponse(); ok { - rw.WriteHeader(code) - if code == 204 || r.Method == "HEAD" { - return - } - - producers := route.Producers - prod, ok := producers[format] - if !ok { - if !ok { - prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) - pr, ok := prods[c.api.DefaultProduces()] - if !ok { - panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) - } - prod = pr - } - } - if err := prod.Produce(rw, data); err != nil { - panic(err) // let the recovery middleware deal with this - } - return - } - - c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response")) -} - -// APIHandler returns a handler to serve the API, this includes a swagger spec, router and the contract defined in the swagger spec -func (c *Context) APIHandler(builder Builder) http.Handler { - b := builder - if b == nil { - b = PassthroughBuilder - } - - var title string - sp := c.spec.Spec() - if sp != nil && sp.Info != nil && sp.Info.Title != "" { - title = sp.Info.Title - } - - redocOpts := RedocOpts{ - BasePath: c.BasePath(), - Title: title, - } - - return Spec("", c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b))) -} - -// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec -func (c *Context) RoutesHandler(builder Builder) http.Handler { - b := builder - if b == nil { - b = PassthroughBuilder - } - return NewRouter(c, b(NewOperationExecutor(c))) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE deleted file mode 100644 index e65039ad..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Naoya Inada - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md deleted file mode 100644 index 30109e17..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md +++ /dev/null @@ -1,180 +0,0 @@ -# Denco [![Build Status](https://travis-ci.org/naoina/denco.png?branch=master)](https://travis-ci.org/naoina/denco) - -The fast and flexible HTTP request router for [Go](http://golang.org). - -Denco is based on Double-Array implementation of [Kocha-urlrouter](https://github.com/naoina/kocha-urlrouter). -However, Denco is optimized and some features added. - -## Features - -* Fast (See [go-http-routing-benchmark](https://github.com/naoina/go-http-routing-benchmark)) -* [URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`) -* Small (but enough) URL router API -* HTTP request multiplexer like `http.ServeMux` - -## Installation - - go get -u github.com/go-openapi/runtime/middleware/denco - -## Using as HTTP request multiplexer - -```go -package main - -import ( - "fmt" - "log" - "net/http" - - "github.com/go-openapi/runtime/middleware/denco" -) - -func Index(w http.ResponseWriter, r *http.Request, params denco.Params) { - fmt.Fprintf(w, "Welcome to Denco!\n") -} - -func User(w http.ResponseWriter, r *http.Request, params denco.Params) { - fmt.Fprintf(w, "Hello %s!\n", params.Get("name")) -} - -func main() { - mux := denco.NewMux() - handler, err := mux.Build([]denco.Handler{ - mux.GET("/", Index), - mux.GET("/user/:name", User), - mux.POST("/user/:name", User), - }) - if err != nil { - panic(err) - } - log.Fatal(http.ListenAndServe(":8080", handler)) -} -``` - -## Using as URL router - -```go -package main - -import ( - "fmt" - - "github.com/go-openapi/runtime/middleware/denco" -) - -type route struct { - name string -} - -func main() { - router := denco.New() - router.Build([]denco.Record{ - {"/", &route{"root"}}, - {"/user/:id", &route{"user"}}, - {"/user/:name/:id", &route{"username"}}, - {"/static/*filepath", &route{"static"}}, - }) - - data, params, found := router.Lookup("/") - // print `&main.route{name:"root"}, denco.Params(nil), true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) - - data, params, found = router.Lookup("/user/hoge") - // print `&main.route{name:"user"}, denco.Params{denco.Param{Name:"id", Value:"hoge"}}, true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) - - data, params, found = router.Lookup("/user/hoge/7") - // print `&main.route{name:"username"}, denco.Params{denco.Param{Name:"name", Value:"hoge"}, denco.Param{Name:"id", Value:"7"}}, true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) - - data, params, found = router.Lookup("/static/path/to/file") - // print `&main.route{name:"static"}, denco.Params{denco.Param{Name:"filepath", Value:"path/to/file"}}, true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) -} -``` - -See [Godoc](http://godoc.org/github.com/go-openapi/runtime/middleware/denco) for more details. - -## Getting the value of path parameter - -You can get the value of path parameter by 2 ways. - -1. Using [`denco.Params.Get`](http://godoc.org/github.com/go-openapi/runtime/middleware/denco#Params.Get) method -2. Find by loop - -```go -package main - -import ( - "fmt" - - "github.com/go-openapi/runtime/middleware/denco" -) - -func main() { - router := denco.New() - if err := router.Build([]denco.Record{ - {"/user/:name/:id", "route1"}, - }); err != nil { - panic(err) - } - - // 1. Using denco.Params.Get method. - _, params, _ := router.Lookup("/user/alice/1") - name := params.Get("name") - if name != "" { - fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". - } - - // 2. Find by loop. - for _, param := range params { - if param.Name == "name" { - fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". - } - } -} -``` - -## URL patterns - -Denco's route matching strategy is "most nearly matching". - -When routes `/:name` and `/alice` have been built, URI `/alice` matches the route `/alice`, not `/:name`. -Because URI `/alice` is more match with the route `/alice` than `/:name`. - -For more example, when routes below have been built: - -``` -/user/alice -/user/:name -/user/:name/:id -/user/alice/:id -/user/:id/bob -``` - -Routes matching are: - -``` -/user/alice => "/user/alice" (no match with "/user/:name") -/user/bob => "/user/:name" -/user/naoina/1 => "/user/:name/1" -/user/alice/1 => "/user/alice/:id" (no match with "/user/:name/:id") -/user/1/bob => "/user/:id/bob" (no match with "/user/:name/:id") -/user/alice/bob => "/user/alice/:id" (no match with "/user/:name/:id" and "/user/:id/bob") -``` - -## Limitation - -Denco has some limitations below. - -* Number of param records (such as `/:name`) must be less than 2^22 -* Number of elements of internal slice must be less than 2^22 - -## Benchmarks - - cd $GOPATH/github.com/go-openapi/runtime/middleware/denco - go test -bench . -benchmem - -## License - -Denco is licensed under the MIT License. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go deleted file mode 100644 index ecacc31f..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go +++ /dev/null @@ -1,456 +0,0 @@ -// Package denco provides fast URL router. -package denco - -import ( - "fmt" - "sort" - "strings" -) - -const ( - // ParamCharacter is a special character for path parameter. - ParamCharacter = ':' - - // WildcardCharacter is a special character for wildcard path parameter. - WildcardCharacter = '*' - - // TerminationCharacter is a special character for end of path. - TerminationCharacter = '#' - - // SeparatorCharacter separates path segments. - SeparatorCharacter = '/' - - // MaxSize is max size of records and internal slice. - MaxSize = (1 << 22) - 1 -) - -// Router represents a URL router. -type Router struct { - // SizeHint expects the maximum number of path parameters in records to Build. - // SizeHint will be used to determine the capacity of the memory to allocate. - // By default, SizeHint will be determined from given records to Build. - SizeHint int - - static map[string]interface{} - param *doubleArray -} - -// New returns a new Router. -func New() *Router { - return &Router{ - SizeHint: -1, - static: make(map[string]interface{}), - param: newDoubleArray(), - } -} - -// Lookup returns data and path parameters that associated with path. -// params is a slice of the Param that arranged in the order in which parameters appeared. -// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}]. -func (rt *Router) Lookup(path string) (data interface{}, params Params, found bool) { - if data, found := rt.static[path]; found { - return data, nil, true - } - if len(rt.param.node) == 1 { - return nil, nil, false - } - nd, params, found := rt.param.lookup(path, make([]Param, 0, rt.SizeHint), 1) - if !found { - return nil, nil, false - } - for i := 0; i < len(params); i++ { - params[i].Name = nd.paramNames[i] - } - return nd.data, params, true -} - -// Build builds URL router from records. -func (rt *Router) Build(records []Record) error { - statics, params := makeRecords(records) - if len(params) > MaxSize { - return fmt.Errorf("denco: too many records") - } - if rt.SizeHint < 0 { - rt.SizeHint = 0 - for _, p := range params { - size := 0 - for _, k := range p.Key { - if k == ParamCharacter || k == WildcardCharacter { - size++ - } - } - if size > rt.SizeHint { - rt.SizeHint = size - } - } - } - for _, r := range statics { - rt.static[r.Key] = r.Value - } - if err := rt.param.build(params, 1, 0, make(map[int]struct{})); err != nil { - return err - } - return nil -} - -// Param represents name and value of path parameter. -type Param struct { - Name string - Value string -} - -// Params represents the name and value of path parameters. -type Params []Param - -// Get gets the first value associated with the given name. -// If there are no values associated with the key, Get returns "". -func (ps Params) Get(name string) string { - for _, p := range ps { - if p.Name == name { - return p.Value - } - } - return "" -} - -type doubleArray struct { - bc []baseCheck - node []*node -} - -func newDoubleArray() *doubleArray { - return &doubleArray{ - bc: []baseCheck{0}, - node: []*node{nil}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node. - } -} - -// baseCheck contains BASE, CHECK and Extra flags. -// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK. -// -// BASE (22bit) | Extra flags (2bit) | CHECK (8bit) -// |----------------------|--|--------| -// 32 10 8 0 -type baseCheck uint32 - -func (bc baseCheck) Base() int { - return int(bc >> 10) -} - -func (bc *baseCheck) SetBase(base int) { - *bc |= baseCheck(base) << 10 -} - -func (bc baseCheck) Check() byte { - return byte(bc) -} - -func (bc *baseCheck) SetCheck(check byte) { - *bc |= baseCheck(check) -} - -func (bc baseCheck) IsEmpty() bool { - return bc&0xfffffcff == 0 -} - -func (bc baseCheck) IsSingleParam() bool { - return bc¶mTypeSingle == paramTypeSingle -} - -func (bc baseCheck) IsWildcardParam() bool { - return bc¶mTypeWildcard == paramTypeWildcard -} - -func (bc baseCheck) IsAnyParam() bool { - return bc¶mTypeAny != 0 -} - -func (bc *baseCheck) SetSingleParam() { - *bc |= (1 << 8) -} - -func (bc *baseCheck) SetWildcardParam() { - *bc |= (1 << 9) -} - -const ( - paramTypeSingle = 0x0100 - paramTypeWildcard = 0x0200 - paramTypeAny = 0x0300 -) - -func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Param, bool) { - indices := make([]uint64, 0, 1) - for i := 0; i < len(path); i++ { - if da.bc[idx].IsAnyParam() { - indices = append(indices, (uint64(i)<<32)|(uint64(idx)&0xffffffff)) - } - c := path[i] - if idx = nextIndex(da.bc[idx].Base(), c); idx >= len(da.bc) || da.bc[idx].Check() != c { - goto BACKTRACKING - } - } - if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter { - return da.node[da.bc[next].Base()], params, true - } -BACKTRACKING: - for j := len(indices) - 1; j >= 0; j-- { - i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff) - if da.bc[idx].IsSingleParam() { - idx := nextIndex(da.bc[idx].Base(), ParamCharacter) - if idx >= len(da.bc) { - break - } - next := NextSeparator(path, i) - params := append(params, Param{Value: path[i:next]}) - if nd, params, found := da.lookup(path[next:], params, idx); found { - return nd, params, true - } - } - if da.bc[idx].IsWildcardParam() { - idx := nextIndex(da.bc[idx].Base(), WildcardCharacter) - params := append(params, Param{Value: path[i:]}) - return da.node[da.bc[idx].Base()], params, true - } - } - return nil, nil, false -} - -// build builds double-array from records. -func (da *doubleArray) build(srcs []*record, idx, depth int, usedBase map[int]struct{}) error { - sort.Stable(recordSlice(srcs)) - base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase) - if err != nil { - return err - } - if leaf != nil { - nd, err := makeNode(leaf) - if err != nil { - return err - } - da.bc[idx].SetBase(len(da.node)) - da.node = append(da.node, nd) - } - for _, sib := range siblings { - da.setCheck(nextIndex(base, sib.c), sib.c) - } - for _, sib := range siblings { - records := srcs[sib.start:sib.end] - switch sib.c { - case ParamCharacter: - for _, r := range records { - next := NextSeparator(r.Key, depth+1) - name := r.Key[depth+1 : next] - r.paramNames = append(r.paramNames, name) - r.Key = r.Key[next:] - } - da.bc[idx].SetSingleParam() - if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { - return err - } - case WildcardCharacter: - r := records[0] - name := r.Key[depth+1 : len(r.Key)-1] - r.paramNames = append(r.paramNames, name) - r.Key = "" - da.bc[idx].SetWildcardParam() - if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { - return err - } - default: - if err := da.build(records, nextIndex(base, sib.c), depth+1, usedBase); err != nil { - return err - } - } - } - return nil -} - -// setBase sets BASE. -func (da *doubleArray) setBase(i, base int) { - da.bc[i].SetBase(base) -} - -// setCheck sets CHECK. -func (da *doubleArray) setCheck(i int, check byte) { - da.bc[i].SetCheck(check) -} - -// findEmptyIndex returns an index of unused BASE/CHECK node. -func (da *doubleArray) findEmptyIndex(start int) int { - i := start - for ; i < len(da.bc); i++ { - if da.bc[i].IsEmpty() { - break - } - } - return i -} - -// findBase returns good BASE. -func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) { - for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) { - base = nextIndex(idx, firstChar) - if _, used := usedBase[base]; used { - continue - } - i := 0 - for ; i < len(siblings); i++ { - next := nextIndex(base, siblings[i].c) - if len(da.bc) <= next { - da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...) - } - if !da.bc[next].IsEmpty() { - break - } - } - if i == len(siblings) { - break - } - } - usedBase[base] = struct{}{} - return base -} - -func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) { - siblings, leaf, err = makeSiblings(records, depth) - if err != nil { - return -1, nil, nil, err - } - if len(siblings) < 1 { - return -1, nil, leaf, nil - } - base = da.findBase(siblings, idx, usedBase) - if base > MaxSize { - return -1, nil, nil, fmt.Errorf("denco: too many elements of internal slice") - } - da.setBase(idx, base) - return base, siblings, leaf, err -} - -// node represents a node of Double-Array. -type node struct { - data interface{} - - // Names of path parameters. - paramNames []string -} - -// makeNode returns a new node from record. -func makeNode(r *record) (*node, error) { - dups := make(map[string]bool) - for _, name := range r.paramNames { - if dups[name] { - return nil, fmt.Errorf("denco: path parameter `%v' is duplicated in the key `%v'", name, r.Key) - } - dups[name] = true - } - return &node{data: r.Value, paramNames: r.paramNames}, nil -} - -// sibling represents an intermediate data of build for Double-Array. -type sibling struct { - // An index of start of duplicated characters. - start int - - // An index of end of duplicated characters. - end int - - // A character of sibling. - c byte -} - -// nextIndex returns a next index of array of BASE/CHECK. -func nextIndex(base int, c byte) int { - return base ^ int(c) -} - -// makeSiblings returns slice of sibling. -func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, err error) { - var ( - pc byte - n int - ) - for i, r := range records { - if len(r.Key) <= depth { - leaf = r - continue - } - c := r.Key[depth] - switch { - case pc < c: - sib = append(sib, sibling{start: i, c: c}) - case pc == c: - continue - default: - return nil, nil, fmt.Errorf("denco: BUG: routing table hasn't been sorted") - } - if n > 0 { - sib[n-1].end = i - } - pc = c - n++ - } - if n == 0 { - return nil, leaf, nil - } - sib[n-1].end = len(records) - return sib, leaf, nil -} - -// Record represents a record data for router construction. -type Record struct { - // Key for router construction. - Key string - - // Result value for Key. - Value interface{} -} - -// NewRecord returns a new Record. -func NewRecord(key string, value interface{}) Record { - return Record{ - Key: key, - Value: value, - } -} - -// record represents a record that use to build the Double-Array. -type record struct { - Record - paramNames []string -} - -// makeRecords returns the records that use to build Double-Arrays. -func makeRecords(srcs []Record) (statics, params []*record) { - termChar := string(TerminationCharacter) - paramPrefix := string(SeparatorCharacter) + string(ParamCharacter) - wildcardPrefix := string(SeparatorCharacter) + string(WildcardCharacter) - for _, r := range srcs { - if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) { - r.Key += termChar - params = append(params, &record{Record: r}) - } else { - statics = append(statics, &record{Record: r}) - } - } - return statics, params -} - -// recordSlice represents a slice of Record for sort and implements the sort.Interface. -type recordSlice []*record - -// Len implements the sort.Interface.Len. -func (rs recordSlice) Len() int { - return len(rs) -} - -// Less implements the sort.Interface.Less. -func (rs recordSlice) Less(i, j int) bool { - return rs[i].Key < rs[j].Key -} - -// Swap implements the sort.Interface.Swap. -func (rs recordSlice) Swap(i, j int) { - rs[i], rs[j] = rs[j], rs[i] -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go deleted file mode 100644 index 0886713c..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go +++ /dev/null @@ -1,106 +0,0 @@ -package denco - -import ( - "net/http" -) - -// Mux represents a multiplexer for HTTP request. -type Mux struct{} - -// NewMux returns a new Mux. -func NewMux() *Mux { - return &Mux{} -} - -// GET is shorthand of Mux.Handler("GET", path, handler). -func (m *Mux) GET(path string, handler HandlerFunc) Handler { - return m.Handler("GET", path, handler) -} - -// POST is shorthand of Mux.Handler("POST", path, handler). -func (m *Mux) POST(path string, handler HandlerFunc) Handler { - return m.Handler("POST", path, handler) -} - -// PUT is shorthand of Mux.Handler("PUT", path, handler). -func (m *Mux) PUT(path string, handler HandlerFunc) Handler { - return m.Handler("PUT", path, handler) -} - -// HEAD is shorthand of Mux.Handler("HEAD", path, handler). -func (m *Mux) HEAD(path string, handler HandlerFunc) Handler { - return m.Handler("HEAD", path, handler) -} - -// Handler returns a handler for HTTP method. -func (m *Mux) Handler(method, path string, handler HandlerFunc) Handler { - return Handler{ - Method: method, - Path: path, - Func: handler, - } -} - -// Build builds a http.Handler. -func (m *Mux) Build(handlers []Handler) (http.Handler, error) { - recordMap := make(map[string][]Record) - for _, h := range handlers { - recordMap[h.Method] = append(recordMap[h.Method], NewRecord(h.Path, h.Func)) - } - mux := newServeMux() - for m, records := range recordMap { - router := New() - if err := router.Build(records); err != nil { - return nil, err - } - mux.routers[m] = router - } - return mux, nil -} - -// Handler represents a handler of HTTP request. -type Handler struct { - // Method is an HTTP method. - Method string - - // Path is a routing path for handler. - Path string - - // Func is a function of handler of HTTP request. - Func HandlerFunc -} - -// The HandlerFunc type is aliased to type of handler function. -type HandlerFunc func(w http.ResponseWriter, r *http.Request, params Params) - -type serveMux struct { - routers map[string]*Router -} - -func newServeMux() *serveMux { - return &serveMux{ - routers: make(map[string]*Router), - } -} - -// ServeHTTP implements http.Handler interface. -func (mux *serveMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - handler, params := mux.handler(r.Method, r.URL.Path) - handler(w, r, params) -} - -func (mux *serveMux) handler(method, path string) (HandlerFunc, []Param) { - if router, found := mux.routers[method]; found { - if handler, params, found := router.Lookup(path); found { - return handler.(HandlerFunc), params - } - } - return NotFound, nil -} - -// NotFound replies to the request with an HTTP 404 not found error. -// NotFound is called when unknown HTTP method or a handler not found. -// If you want to use the your own NotFound handler, please overwrite this variable. -var NotFound = func(w http.ResponseWriter, r *http.Request, _ Params) { - http.NotFound(w, r) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go deleted file mode 100644 index edc1f6ab..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go +++ /dev/null @@ -1,12 +0,0 @@ -package denco - -// NextSeparator returns an index of next separator in path. -func NextSeparator(path string, start int) int { - for start < len(path) { - if c := path[start]; c == '/' || c == TerminationCharacter { - break - } - start++ - } - return start -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/doc.go b/vendor/github.com/go-openapi/runtime/middleware/doc.go deleted file mode 100644 index eaf90606..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/*Package middleware provides the library with helper functions for serving swagger APIs. - -Pseudo middleware handler - - import ( - "net/http" - - "github.com/go-openapi/errors" - ) - - func newCompleteMiddleware(ctx *Context) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - // use context to lookup routes - if matched, ok := ctx.RouteInfo(r); ok { - - if matched.NeedsAuth() { - if _, err := ctx.Authorize(r, matched); err != nil { - ctx.Respond(rw, r, matched.Produces, matched, err) - return - } - } - - bound, validation := ctx.BindAndValidate(r, matched) - if validation != nil { - ctx.Respond(rw, r, matched.Produces, matched, validation) - return - } - - result, err := matched.Handler.Handle(bound) - if err != nil { - ctx.Respond(rw, r, matched.Produces, matched, err) - return - } - - ctx.Respond(rw, r, matched.Produces, matched, result) - return - } - - // Not found, check if it exists in the other methods first - if others := ctx.AllowedMethods(r); len(others) > 0 { - ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) - return - } - ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path)) - }) - } -*/ -package middleware diff --git a/vendor/github.com/go-openapi/runtime/middleware/go18.go b/vendor/github.com/go-openapi/runtime/middleware/go18.go deleted file mode 100644 index 75c762c0..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/go18.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.8 - -package middleware - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/header/header.go b/vendor/github.com/go-openapi/runtime/middleware/header/header.go deleted file mode 100644 index 3e342258..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/header/header.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// this file was taken from the github.com/golang/gddo repository - -// Package header provides functions for parsing HTTP headers. -package header - -import ( - "net/http" - "strings" - "time" -) - -// Octet types from RFC 2616. -var octetTypes [256]octetType - -type octetType byte - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// Copy returns a shallow copy of the header. -func Copy(header http.Header) http.Header { - h := make(http.Header) - for k, vs := range header { - h[k] = vs - } - return h -} - -var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC} - -// ParseTime parses the header as time. The zero value is returned if the -// header is not present or there is an error parsing the -// header. -func ParseTime(header http.Header, key string) time.Time { - if s := header.Get(key); s != "" { - for _, layout := range timeLayouts { - if t, err := time.Parse(layout, s); err == nil { - return t.UTC() - } - } - } - return time.Time{} -} - -// ParseList parses a comma separated list of values. Commas are ignored in -// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is -// trimmed. -func ParseList(header http.Header, key string) []string { - var result []string - for _, s := range header[http.CanonicalHeaderKey(key)] { - begin := 0 - end := 0 - escape := false - quote := false - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - end = i + 1 - case quote: - switch b { - case '\\': - escape = true - case '"': - quote = false - } - end = i + 1 - case b == '"': - quote = true - end = i + 1 - case octetTypes[b]&isSpace != 0: - if begin == end { - begin = i + 1 - end = begin - } - case b == ',': - if begin < end { - result = append(result, s[begin:end]) - } - begin = i + 1 - end = begin - default: - end = i + 1 - } - } - if begin < end { - result = append(result, s[begin:end]) - } - } - return result -} - -// ParseValueAndParams parses a comma separated list of values with optional -// semicolon separated name-value pairs. Content-Type and Content-Disposition -// headers are in this format. -func ParseValueAndParams(header http.Header, key string) (string, map[string]string) { - return parseValueAndParams(header.Get(key)) -} - -func parseValueAndParams(s string) (value string, params map[string]string) { - params = make(map[string]string) - value, s = expectTokenSlash(s) - if value == "" { - return - } - value = strings.ToLower(value) - s = skipSpace(s) - for strings.HasPrefix(s, ";") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -// AcceptSpec ... -type AcceptSpec struct { - Value string - Q float64 -} - -// ParseAccept2 ... -func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) { - for _, en := range ParseList(header, key) { - v, p := parseValueAndParams(en) - var spec AcceptSpec - spec.Value = v - spec.Q = 1.0 - if p != nil { - if q, ok := p["q"]; ok { - spec.Q, _ = expectQuality(q) - } - } - if spec.Q < 0.0 { - continue - } - specs = append(specs, spec) - } - - return -} - -// ParseAccept parses Accept* headers. -func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { -loop: - for _, s := range header[key] { - for { - var spec AcceptSpec - spec.Value, s = expectTokenSlash(s) - if spec.Value == "" { - continue loop - } - spec.Q = 1.0 - s = skipSpace(s) - if strings.HasPrefix(s, ";") { - s = skipSpace(s[1:]) - for !strings.HasPrefix(s, "q=") && s != "" && !strings.HasPrefix(s, ",") { - s = skipSpace(s[1:]) - } - if strings.HasPrefix(s, "q=") { - spec.Q, s = expectQuality(s[2:]) - if spec.Q < 0.0 { - continue loop - } - } - } - specs = append(specs, spec) - s = skipSpace(s) - if !strings.HasPrefix(s, ",") { - continue loop - } - s = skipSpace(s[1:]) - } - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenSlash(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - b := s[i] - if (octetTypes[b]&isToken == 0) && b != '/' { - break - } - } - return s[:i], s[i:] -} - -func expectQuality(s string) (q float64, rest string) { - switch { - case len(s) == 0: - return -1, "" - case s[0] == '0': - q = 0 - case s[0] == '1': - q = 1 - default: - return -1, "" - } - s = s[1:] - if !strings.HasPrefix(s, ".") { - return q, s - } - s = s[1:] - i := 0 - n := 0 - d := 1 - for ; i < len(s); i++ { - b := s[i] - if b < '0' || b > '9' { - break - } - n = n*10 + int(b) - '0' - d *= 10 - } - return q + float64(n)/float64(d), s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/negotiate.go b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go deleted file mode 100644 index a9b6f27d..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/negotiate.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// this file was taken from the github.com/golang/gddo repository - -package middleware - -import ( - "net/http" - "strings" - - "github.com/go-openapi/runtime/middleware/header" -) - -// NegotiateContentEncoding returns the best offered content encoding for the -// request's Accept-Encoding header. If two offers match with equal weight and -// then the offer earlier in the list is preferred. If no offers are -// acceptable, then "" is returned. -func NegotiateContentEncoding(r *http.Request, offers []string) string { - bestOffer := "identity" - bestQ := -1.0 - specs := header.ParseAccept(r.Header, "Accept-Encoding") - for _, offer := range offers { - for _, spec := range specs { - if spec.Q > bestQ && - (spec.Value == "*" || spec.Value == offer) { - bestQ = spec.Q - bestOffer = offer - } - } - } - if bestQ == 0 { - bestOffer = "" - } - return bestOffer -} - -// NegotiateContentType returns the best offered content type for the request's -// Accept header. If two offers match with equal weight, then the more specific -// offer is preferred. For example, text/* trumps */*. If two offers match -// with equal weight and specificity, then the offer earlier in the list is -// preferred. If no offers match, then defaultOffer is returned. -func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string { - bestOffer := defaultOffer - bestQ := -1.0 - bestWild := 3 - specs := header.ParseAccept(r.Header, "Accept") - for _, rawOffer := range offers { - offer := normalizeOffer(rawOffer) - // No Accept header: just return the first offer. - if len(specs) == 0 { - return rawOffer - } - for _, spec := range specs { - switch { - case spec.Q == 0.0: - // ignore - case spec.Q < bestQ: - // better match found - case spec.Value == "*/*": - if spec.Q > bestQ || bestWild > 2 { - bestQ = spec.Q - bestWild = 2 - bestOffer = rawOffer - } - case strings.HasSuffix(spec.Value, "/*"): - if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) && - (spec.Q > bestQ || bestWild > 1) { - bestQ = spec.Q - bestWild = 1 - bestOffer = rawOffer - } - default: - if spec.Value == offer && - (spec.Q > bestQ || bestWild > 0) { - bestQ = spec.Q - bestWild = 0 - bestOffer = rawOffer - } - } - } - } - return bestOffer -} - -func normalizeOffers(orig []string) (norm []string) { - for _, o := range orig { - norm = append(norm, normalizeOffer(o)) - } - return -} - -func normalizeOffer(orig string) string { - return strings.SplitN(orig, ";", 2)[0] -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go deleted file mode 100644 index 466f553d..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "net/http" - - "github.com/go-openapi/runtime" -) - -type errorResp struct { - code int - response interface{} - headers http.Header -} - -func (e *errorResp) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { - for k, v := range e.headers { - for _, val := range v { - rw.Header().Add(k, val) - } - } - if e.code > 0 { - rw.WriteHeader(e.code) - } else { - rw.WriteHeader(http.StatusInternalServerError) - } - if err := producer.Produce(rw, e.response); err != nil { - panic(err) - } -} - -// NotImplemented the error response when the response is not implemented -func NotImplemented(message string) Responder { - return &errorResp{http.StatusNotImplemented, message, make(http.Header)} -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/operation.go b/vendor/github.com/go-openapi/runtime/middleware/operation.go deleted file mode 100644 index 1175a63c..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/operation.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import "net/http" - -// NewOperationExecutor creates a context aware middleware that handles the operations after routing -func NewOperationExecutor(ctx *Context) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - // use context to lookup routes - route, rCtx, _ := ctx.RouteInfo(r) - if rCtx != nil { - r = rCtx - } - - route.Handler.ServeHTTP(rw, r) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/vendor/github.com/go-openapi/runtime/middleware/parameter.go deleted file mode 100644 index 2088605f..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/parameter.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "net/http" - "reflect" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" - - "github.com/go-openapi/runtime" -) - -const defaultMaxMemory = 32 << 20 - -var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() - -func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder { - binder := new(untypedParamBinder) - binder.Name = param.Name - binder.parameter = ¶m - binder.formats = formats - if param.In != "body" { - binder.validator = validate.NewParamValidator(¶m, formats) - } else { - binder.validator = validate.NewSchemaValidator(param.Schema, spec, param.Name, formats) - } - - return binder -} - -type untypedParamBinder struct { - parameter *spec.Parameter - formats strfmt.Registry - Name string - validator validate.EntityValidator -} - -func (p *untypedParamBinder) Type() reflect.Type { - return p.typeForSchema(p.parameter.Type, p.parameter.Format, p.parameter.Items) -} - -func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items) reflect.Type { - switch tpe { - case "boolean": - return reflect.TypeOf(true) - - case "string": - if tt, ok := p.formats.GetType(format); ok { - return tt - } - return reflect.TypeOf("") - - case "integer": - switch format { - case "int8": - return reflect.TypeOf(int8(0)) - case "int16": - return reflect.TypeOf(int16(0)) - case "int32": - return reflect.TypeOf(int32(0)) - case "int64": - return reflect.TypeOf(int64(0)) - default: - return reflect.TypeOf(int64(0)) - } - - case "number": - switch format { - case "float": - return reflect.TypeOf(float32(0)) - case "double": - return reflect.TypeOf(float64(0)) - } - - case "array": - if items == nil { - return nil - } - itemsType := p.typeForSchema(items.Type, items.Format, items.Items) - if itemsType == nil { - return nil - } - return reflect.MakeSlice(reflect.SliceOf(itemsType), 0, 0).Type() - - case "file": - return reflect.TypeOf(&runtime.File{}).Elem() - - case "object": - return reflect.TypeOf(map[string]interface{}{}) - } - return nil -} - -func (p *untypedParamBinder) allowsMulti() bool { - return p.parameter.In == "query" || p.parameter.In == "formData" -} - -func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) { - name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type - if tpe == "array" { - if cf == "multi" { - if !p.allowsMulti() { - return nil, false, false, errors.InvalidCollectionFormat(name, in, cf) - } - vv, hasKey, _ := values.GetOK(name) - return vv, false, hasKey, nil - } - - v, hk, hv := values.GetOK(name) - if !hv { - return nil, false, hk, nil - } - d, c, e := p.readFormattedSliceFieldValue(v[len(v)-1], target) - return d, c, hk, e - } - - vv, hk, _ := values.GetOK(name) - return vv, false, hk, nil -} - -func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, target reflect.Value) error { - // fmt.Println("binding", p.name, "as", p.Type()) - switch p.parameter.In { - case "query": - data, custom, hasKey, err := p.readValue(runtime.Values(request.URL.Query()), target) - if err != nil { - return err - } - if custom { - return nil - } - - return p.bindValue(data, hasKey, target) - - case "header": - data, custom, hasKey, err := p.readValue(runtime.Values(request.Header), target) - if err != nil { - return err - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - - case "path": - data, custom, hasKey, err := p.readValue(routeParams, target) - if err != nil { - return err - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - - case "formData": - var err error - var mt string - - mt, _, e := runtime.ContentType(request.Header) - if e != nil { - // because of the interface conversion go thinks the error is not nil - // so we first check for nil and then set the err var if it's not nil - err = e - } - - if err != nil { - return errors.InvalidContentType("", []string{"multipart/form-data", "application/x-www-form-urlencoded"}) - } - - if mt != "multipart/form-data" && mt != "application/x-www-form-urlencoded" { - return errors.InvalidContentType(mt, []string{"multipart/form-data", "application/x-www-form-urlencoded"}) - } - - if mt == "multipart/form-data" { - if err = request.ParseMultipartForm(defaultMaxMemory); err != nil { - return errors.NewParseError(p.Name, p.parameter.In, "", err) - } - } - - if err = request.ParseForm(); err != nil { - return errors.NewParseError(p.Name, p.parameter.In, "", err) - } - - if p.parameter.Type == "file" { - file, header, ffErr := request.FormFile(p.parameter.Name) - if ffErr != nil { - return errors.NewParseError(p.Name, p.parameter.In, "", ffErr) - } - target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header})) - return nil - } - - if request.MultipartForm != nil { - data, custom, hasKey, rvErr := p.readValue(runtime.Values(request.MultipartForm.Value), target) - if rvErr != nil { - return rvErr - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - } - data, custom, hasKey, err := p.readValue(runtime.Values(request.PostForm), target) - if err != nil { - return err - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - - case "body": - newValue := reflect.New(target.Type()) - if !runtime.HasBody(request) { - if p.parameter.Default != nil { - target.Set(reflect.ValueOf(p.parameter.Default)) - } - - return nil - } - if err := consumer.Consume(request.Body, newValue.Interface()); err != nil { - if err == io.EOF && p.parameter.Default != nil { - target.Set(reflect.ValueOf(p.parameter.Default)) - return nil - } - tpe := p.parameter.Type - if p.parameter.Format != "" { - tpe = p.parameter.Format - } - return errors.InvalidType(p.Name, p.parameter.In, tpe, nil) - } - target.Set(reflect.Indirect(newValue)) - return nil - default: - return errors.New(500, fmt.Sprintf("invalid parameter location %q", p.parameter.In)) - } -} - -func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error { - if p.parameter.Type == "array" { - return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey) - } - var d string - if len(data) > 0 { - d = data[len(data)-1] - } - return p.setFieldValue(target, p.parameter.Default, d, hasKey) -} - -func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error { - tpe := p.parameter.Type - if p.parameter.Format != "" { - tpe = p.parameter.Format - } - - if (!hasKey || (!p.parameter.AllowEmptyValue && data == "")) && p.parameter.Required && p.parameter.Default == nil { - return errors.Required(p.Name, p.parameter.In) - } - - ok, err := p.tryUnmarshaler(target, defaultValue, data) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if ok { - return nil - } - - defVal := reflect.Zero(target.Type()) - if defaultValue != nil { - defVal = reflect.ValueOf(defaultValue) - } - - if tpe == "byte" { - if data == "" { - if target.CanSet() { - target.SetBytes(defVal.Bytes()) - } - return nil - } - - b, err := base64.StdEncoding.DecodeString(data) - if err != nil { - b, err = base64.URLEncoding.DecodeString(data) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - } - if target.CanSet() { - target.SetBytes(b) - } - return nil - } - - switch target.Kind() { - case reflect.Bool: - if data == "" { - if target.CanSet() { - target.SetBool(defVal.Bool()) - } - return nil - } - b, err := swag.ConvertBool(data) - if err != nil { - return err - } - if target.CanSet() { - target.SetBool(b) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if data == "" { - if target.CanSet() { - rd := defVal.Convert(reflect.TypeOf(int64(0))) - target.SetInt(rd.Int()) - } - return nil - } - i, err := strconv.ParseInt(data, 10, 64) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.OverflowInt(i) { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.CanSet() { - target.SetInt(i) - } - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if data == "" { - if target.CanSet() { - rd := defVal.Convert(reflect.TypeOf(uint64(0))) - target.SetUint(rd.Uint()) - } - return nil - } - u, err := strconv.ParseUint(data, 10, 64) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.OverflowUint(u) { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.CanSet() { - target.SetUint(u) - } - - case reflect.Float32, reflect.Float64: - if data == "" { - if target.CanSet() { - rd := defVal.Convert(reflect.TypeOf(float64(0))) - target.SetFloat(rd.Float()) - } - return nil - } - f, err := strconv.ParseFloat(data, 64) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.OverflowFloat(f) { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.CanSet() { - target.SetFloat(f) - } - - case reflect.String: - value := data - if value == "" { - value = defVal.String() - } - // validate string - if target.CanSet() { - target.SetString(value) - } - - case reflect.Ptr: - if data == "" && defVal.Kind() == reflect.Ptr { - if target.CanSet() { - target.Set(defVal) - } - return nil - } - newVal := reflect.New(target.Type().Elem()) - if err := p.setFieldValue(reflect.Indirect(newVal), defVal, data, hasKey); err != nil { - return err - } - if target.CanSet() { - target.Set(newVal) - } - - default: - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - return nil -} - -func (p *untypedParamBinder) tryUnmarshaler(target reflect.Value, defaultValue interface{}, data string) (bool, error) { - if !target.CanSet() { - return false, nil - } - // When a type implements encoding.TextUnmarshaler we'll use that instead of reflecting some more - if reflect.PtrTo(target.Type()).Implements(textUnmarshalType) { - if defaultValue != nil && len(data) == 0 { - target.Set(reflect.ValueOf(defaultValue)) - return true, nil - } - value := reflect.New(target.Type()) - if err := value.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(data)); err != nil { - return true, err - } - target.Set(reflect.Indirect(value)) - return true, nil - } - return false, nil -} - -func (p *untypedParamBinder) readFormattedSliceFieldValue(data string, target reflect.Value) ([]string, bool, error) { - ok, err := p.tryUnmarshaler(target, p.parameter.Default, data) - if err != nil { - return nil, true, err - } - if ok { - return nil, true, nil - } - - return swag.SplitByFormat(data, p.parameter.CollectionFormat), false, nil -} - -func (p *untypedParamBinder) setSliceFieldValue(target reflect.Value, defaultValue interface{}, data []string, hasKey bool) error { - sz := len(data) - if (!hasKey || (!p.parameter.AllowEmptyValue && (sz == 0 || (sz == 1 && data[0] == "")))) && p.parameter.Required && defaultValue == nil { - return errors.Required(p.Name, p.parameter.In) - } - - defVal := reflect.Zero(target.Type()) - if defaultValue != nil { - defVal = reflect.ValueOf(defaultValue) - } - - if !target.CanSet() { - return nil - } - if sz == 0 { - target.Set(defVal) - return nil - } - - value := reflect.MakeSlice(reflect.SliceOf(target.Type().Elem()), sz, sz) - - for i := 0; i < sz; i++ { - if err := p.setFieldValue(value.Index(i), nil, data[i], hasKey); err != nil { - return err - } - } - - target.Set(value) - - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go b/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go deleted file mode 100644 index 03385251..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !go1.8 - -package middleware - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go deleted file mode 100644 index 019c8542..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/redoc.go +++ /dev/null @@ -1,103 +0,0 @@ -package middleware - -import ( - "bytes" - "fmt" - "html/template" - "net/http" - "path" -) - -// RedocOpts configures the Redoc middlewares -type RedocOpts struct { - // BasePath for the UI path, defaults to: / - BasePath string - // Path combines with BasePath for the full UI path, defaults to: docs - Path string - // SpecURL the url to find the spec for - SpecURL string - // RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js - RedocURL string - // Title for the documentation site, default to: API documentation - Title string -} - -// EnsureDefaults in case some options are missing -func (r *RedocOpts) EnsureDefaults() { - if r.BasePath == "" { - r.BasePath = "/" - } - if r.Path == "" { - r.Path = "docs" - } - if r.SpecURL == "" { - r.SpecURL = "/swagger.json" - } - if r.RedocURL == "" { - r.RedocURL = redocLatest - } - if r.Title == "" { - r.Title = "API documentation" - } -} - -// Redoc creates a middleware to serve a documentation site for a swagger spec. -// This allows for altering the spec before starting the http listener. -// -func Redoc(opts RedocOpts, next http.Handler) http.Handler { - opts.EnsureDefaults() - - pth := path.Join(opts.BasePath, opts.Path) - tmpl := template.Must(template.New("redoc").Parse(redocTemplate)) - - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) -} - -const ( - redocLatest = "https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js" - redocTemplate = ` - - - {{ .Title }} - - - - - - - - - - - - - -` -) diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go deleted file mode 100644 index 4e1c0ab5..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/request.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "net/http" - "reflect" - - "github.com/go-openapi/errors" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" -) - -// RequestBinder binds and validates the data from a http request -type untypedRequestBinder struct { - Spec *spec.Swagger - Parameters map[string]spec.Parameter - Formats strfmt.Registry - paramBinders map[string]*untypedParamBinder -} - -// NewRequestBinder creates a new binder for reading a request. -func newUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedRequestBinder { - binders := make(map[string]*untypedParamBinder) - for fieldName, param := range parameters { - binders[fieldName] = newUntypedParamBinder(param, spec, formats) - } - return &untypedRequestBinder{ - Parameters: parameters, - paramBinders: binders, - Spec: spec, - Formats: formats, - } -} - -// Bind perform the databinding and validation -func (o *untypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data interface{}) error { - val := reflect.Indirect(reflect.ValueOf(data)) - isMap := val.Kind() == reflect.Map - var result []error - debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) - for fieldName, param := range o.Parameters { - binder := o.paramBinders[fieldName] - debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) - var target reflect.Value - if !isMap { - binder.Name = fieldName - target = val.FieldByName(fieldName) - } - - if isMap { - tpe := binder.Type() - if tpe == nil { - if param.Schema.Type.Contains("array") { - tpe = reflect.TypeOf([]interface{}{}) - } else { - tpe = reflect.TypeOf(map[string]interface{}{}) - } - } - target = reflect.Indirect(reflect.New(tpe)) - } - - if !target.IsValid() { - result = append(result, errors.New(500, "parameter name %q is an unknown field", binder.Name)) - continue - } - - if err := binder.Bind(request, routeParams, consumer, target); err != nil { - result = append(result, err) - continue - } - - if binder.validator != nil { - rr := binder.validator.Validate(target.Interface()) - if rr != nil && rr.HasErrors() { - result = append(result, rr.AsError()) - } - } - - if isMap { - val.SetMapIndex(reflect.ValueOf(param.Name), target) - } - } - - if len(result) > 0 { - return errors.CompositeValidationError(result...) - } - - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go deleted file mode 100644 index 6d797e82..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/router.go +++ /dev/null @@ -1,478 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "fmt" - "net/http" - fpath "path" - "regexp" - "strings" - - "github.com/go-openapi/runtime/security" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/errors" - "github.com/go-openapi/loads" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/runtime/middleware/denco" -) - -// RouteParam is a object to capture route params in a framework agnostic way. -// implementations of the muxer should use these route params to communicate with the -// swagger framework -type RouteParam struct { - Name string - Value string -} - -// RouteParams the collection of route params -type RouteParams []RouteParam - -// Get gets the value for the route param for the specified key -func (r RouteParams) Get(name string) string { - vv, _, _ := r.GetOK(name) - if len(vv) > 0 { - return vv[len(vv)-1] - } - return "" -} - -// GetOK gets the value but also returns booleans to indicate if a key or value -// is present. This aids in validation and satisfies an interface in use there -// -// The returned values are: data, has key, has value -func (r RouteParams) GetOK(name string) ([]string, bool, bool) { - for _, p := range r { - if p.Name == name { - return []string{p.Value}, true, p.Value != "" - } - } - return nil, false, false -} - -// NewRouter creates a new context aware router middleware -func NewRouter(ctx *Context, next http.Handler) http.Handler { - if ctx.router == nil { - ctx.router = DefaultRouter(ctx.spec, ctx.api) - } - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if _, rCtx, ok := ctx.RouteInfo(r); ok { - next.ServeHTTP(rw, rCtx) - return - } - - // Not found, check if it exists in the other methods first - if others := ctx.AllowedMethods(r); len(others) > 0 { - ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) - return - } - - ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) - }) -} - -// RoutableAPI represents an interface for things that can serve -// as a provider of implementations for the swagger router -type RoutableAPI interface { - HandlerFor(string, string) (http.Handler, bool) - ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error) - ConsumersFor([]string) map[string]runtime.Consumer - ProducersFor([]string) map[string]runtime.Producer - AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator - Authorizer() runtime.Authorizer - Formats() strfmt.Registry - DefaultProduces() string - DefaultConsumes() string -} - -// Router represents a swagger aware router -type Router interface { - Lookup(method, path string) (*MatchedRoute, bool) - OtherMethods(method, path string) []string -} - -type defaultRouteBuilder struct { - spec *loads.Document - analyzer *analysis.Spec - api RoutableAPI - records map[string][]denco.Record -} - -type defaultRouter struct { - spec *loads.Document - routers map[string]*denco.Router -} - -func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder { - return &defaultRouteBuilder{ - spec: spec, - analyzer: analysis.New(spec.Spec()), - api: api, - records: make(map[string][]denco.Record), - } -} - -// DefaultRouter creates a default implemenation of the router -func DefaultRouter(spec *loads.Document, api RoutableAPI) Router { - builder := newDefaultRouteBuilder(spec, api) - if spec != nil { - for method, paths := range builder.analyzer.Operations() { - for path, operation := range paths { - fp := fpath.Join(spec.BasePath(), path) - debugLog("adding route %s %s %q", method, fp, operation.ID) - builder.AddRoute(method, fp, operation) - } - } - } - return builder.Build() -} - -// RouteAuthenticator is an authenticator that can compose several authenticators together. -// It also knows when it contains an authenticator that allows for anonymous pass through. -// Contains a group of 1 or more authenticators that have a logical AND relationship -type RouteAuthenticator struct { - Authenticator map[string]runtime.Authenticator - Schemes []string - Scopes map[string][]string - allScopes []string - commonScopes []string - allowAnonymous bool -} - -func (ra *RouteAuthenticator) AllowsAnonymous() bool { - return ra.allowAnonymous -} - -// AllScopes returns a list of unique scopes that is the combination -// of all the scopes in the requirements -func (ra *RouteAuthenticator) AllScopes() []string { - return ra.allScopes -} - -// CommonScopes returns a list of unique scopes that are common in all the -// scopes in the requirements -func (ra *RouteAuthenticator) CommonScopes() []string { - return ra.commonScopes -} - -// Authenticate Authenticator interface implementation -func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { - if ra.allowAnonymous { - route.Authenticator = ra - return true, nil, nil - } - // iterate in proper order - var lastResult interface{} - for _, scheme := range ra.Schemes { - if authenticator, ok := ra.Authenticator[scheme]; ok { - applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{ - Request: req, - RequiredScopes: ra.Scopes[scheme], - }) - if !applies { - return false, nil, nil - } - if err != nil { - route.Authenticator = ra - return true, nil, err - } - lastResult = princ - } - } - route.Authenticator = ra - return true, lastResult, nil -} - -func stringSliceUnion(slices ...[]string) []string { - unique := make(map[string]struct{}) - var result []string - for _, slice := range slices { - for _, entry := range slice { - if _, ok := unique[entry]; ok { - continue - } - unique[entry] = struct{}{} - result = append(result, entry) - } - } - return result -} - -func stringSliceIntersection(slices ...[]string) []string { - unique := make(map[string]int) - var intersection []string - - total := len(slices) - var emptyCnt int - for _, slice := range slices { - if len(slice) == 0 { - emptyCnt++ - continue - } - - for _, entry := range slice { - unique[entry]++ - if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices - intersection = append(intersection, entry) - } - } - } - - return intersection -} - -// RouteAuthenticators represents a group of authenticators that represent a logical OR -type RouteAuthenticators []RouteAuthenticator - -// AllowsAnonymous returns true when there is an authenticator that means optional auth -func (ras RouteAuthenticators) AllowsAnonymous() bool { - for _, ra := range ras { - if ra.AllowsAnonymous() { - return true - } - } - return false -} - -// Authenticate method implemention so this collection can be used as authenticator -func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { - var lastError error - var allowsAnon bool - var anonAuth RouteAuthenticator - - for _, ra := range ras { - if ra.AllowsAnonymous() { - anonAuth = ra - allowsAnon = true - continue - } - applies, usr, err := ra.Authenticate(req, route) - if !applies || err != nil || usr == nil { - if err != nil { - lastError = err - } - continue - } - return applies, usr, nil - } - - if allowsAnon && lastError == nil { - route.Authenticator = &anonAuth - return true, nil, lastError - } - return lastError != nil, nil, lastError -} - -type routeEntry struct { - PathPattern string - BasePath string - Operation *spec.Operation - Consumes []string - Consumers map[string]runtime.Consumer - Produces []string - Producers map[string]runtime.Producer - Parameters map[string]spec.Parameter - Handler http.Handler - Formats strfmt.Registry - Binder *untypedRequestBinder - Authenticators RouteAuthenticators - Authorizer runtime.Authorizer -} - -// MatchedRoute represents the route that was matched in this request -type MatchedRoute struct { - routeEntry - Params RouteParams - Consumer runtime.Consumer - Producer runtime.Producer - Authenticator *RouteAuthenticator -} - -// HasAuth returns true when the route has a security requirement defined -func (m *MatchedRoute) HasAuth() bool { - return len(m.Authenticators) > 0 -} - -// NeedsAuth returns true when the request still -// needs to perform authentication -func (m *MatchedRoute) NeedsAuth() bool { - return m.HasAuth() && m.Authenticator == nil -} - -func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { - mth := strings.ToUpper(method) - debugLog("looking up route for %s %s", method, path) - if Debug { - if len(d.routers) == 0 { - debugLog("there are no known routers") - } - for meth := range d.routers { - debugLog("got a router for %s", meth) - } - } - if router, ok := d.routers[mth]; ok { - if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil { - if entry, ok := m.(*routeEntry); ok { - debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) - var params RouteParams - for _, p := range rp { - v, err := pathUnescape(p.Value) - if err != nil { - debugLog("failed to escape %q: %v", p.Value, err) - v = p.Value - } - // a workaround to handle fragment/composing parameters until they are supported in denco router - // check if this parameter is a fragment within a path segment - if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + 2; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' { - // extract fragment parameters - ep := strings.Split(entry.PathPattern[xpos:], "/")[0] - pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil) - for i, pname := range pnames { - params = append(params, RouteParam{Name: pname, Value: pvalues[i]}) - } - } else { - // use the parameter directly - params = append(params, RouteParam{Name: p.Name, Value: v}) - } - } - return &MatchedRoute{routeEntry: *entry, Params: params}, true - } - } else { - debugLog("couldn't find a route by path for %s %s", method, path) - } - } else { - debugLog("couldn't find a route by method for %s %s", method, path) - } - return nil, false -} - -func (d *defaultRouter) OtherMethods(method, path string) []string { - mn := strings.ToUpper(method) - var methods []string - for k, v := range d.routers { - if k != mn { - if _, _, ok := v.Lookup(fpath.Clean(path)); ok { - methods = append(methods, k) - continue - } - } - } - return methods -} - -// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco -var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) - -func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) { - pleft := strings.Index(pattern, "{") - names = append(names, name) - if pleft < 0 { - if strings.HasSuffix(value, pattern) { - values = append(values, value[:len(value)-len(pattern)]) - } else { - values = append(values, "") - } - } else { - toskip := pattern[:pleft] - pright := strings.Index(pattern, "}") - vright := strings.Index(value, toskip) - if vright >= 0 { - values = append(values, value[:vright]) - } else { - values = append(values, "") - value = "" - } - return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values) - } - return names, values -} - -func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) { - mn := strings.ToUpper(method) - - bp := fpath.Clean(d.spec.BasePath()) - if len(bp) > 0 && bp[len(bp)-1] == '/' { - bp = bp[:len(bp)-1] - } - - debugLog("operation: %#v", *operation) - if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok { - consumes := d.analyzer.ConsumesFor(operation) - produces := d.analyzer.ProducesFor(operation) - parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp)) - - record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{ - BasePath: bp, - PathPattern: path, - Operation: operation, - Handler: handler, - Consumes: consumes, - Produces: produces, - Consumers: d.api.ConsumersFor(normalizeOffers(consumes)), - Producers: d.api.ProducersFor(normalizeOffers(produces)), - Parameters: parameters, - Formats: d.api.Formats(), - Binder: newUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()), - Authenticators: d.buildAuthenticators(operation), - Authorizer: d.api.Authorizer(), - }) - d.records[mn] = append(d.records[mn], record) - } -} - -func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators { - requirements := d.analyzer.SecurityRequirementsFor(operation) - var auths []RouteAuthenticator - for _, reqs := range requirements { - var schemes []string - scopes := make(map[string][]string, len(reqs)) - var scopeSlices [][]string - for _, req := range reqs { - schemes = append(schemes, req.Name) - scopes[req.Name] = req.Scopes - scopeSlices = append(scopeSlices, req.Scopes) - } - - definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs) - authenticators := d.api.AuthenticatorsFor(definitions) - auths = append(auths, RouteAuthenticator{ - Authenticator: authenticators, - Schemes: schemes, - Scopes: scopes, - allScopes: stringSliceUnion(scopeSlices...), - commonScopes: stringSliceIntersection(scopeSlices...), - allowAnonymous: len(reqs) == 1 && reqs[0].Name == "", - }) - } - return auths -} - -func (d *defaultRouteBuilder) Build() *defaultRouter { - routers := make(map[string]*denco.Router) - for method, records := range d.records { - router := denco.New() - _ = router.Build(records) - routers[method] = router - } - return &defaultRouter{ - spec: d.spec, - routers: routers, - } -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/security.go b/vendor/github.com/go-openapi/runtime/middleware/security.go deleted file mode 100644 index 2b061cae..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/security.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import "net/http" - -func newSecureAPI(ctx *Context, next http.Handler) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - route, rCtx, _ := ctx.RouteInfo(r) - if rCtx != nil { - r = rCtx - } - if route != nil && !route.NeedsAuth() { - next.ServeHTTP(rw, r) - return - } - - _, rCtx, err := ctx.Authorize(r, route) - if err != nil { - ctx.Respond(rw, r, route.Produces, route, err) - return - } - r = rCtx - - next.ServeHTTP(rw, r) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go deleted file mode 100644 index f0291429..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/spec.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "net/http" - "path" -) - -// Spec creates a middleware to serve a swagger spec. -// This allows for altering the spec before starting the http listener. -// This can be useful if you want to serve the swagger spec from another path than /swagger.json -// -func Spec(basePath string, b []byte, next http.Handler) http.Handler { - if basePath == "" { - basePath = "/" - } - pth := path.Join(basePath, "swagger.json") - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(http.StatusOK) - //#nosec - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(http.StatusNotFound) - return - } - next.ServeHTTP(rw, r) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go deleted file mode 100644 index 39a85f7d..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package untyped - -import ( - "fmt" - "net/http" - "sort" - "strings" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/errors" - "github.com/go-openapi/loads" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" -) - -// NewAPI creates the default untyped API -func NewAPI(spec *loads.Document) *API { - var an *analysis.Spec - if spec != nil && spec.Spec() != nil { - an = analysis.New(spec.Spec()) - } - api := &API{ - spec: spec, - analyzer: an, - consumers: make(map[string]runtime.Consumer, 10), - producers: make(map[string]runtime.Producer, 10), - authenticators: make(map[string]runtime.Authenticator), - operations: make(map[string]map[string]runtime.OperationHandler), - ServeError: errors.ServeError, - Models: make(map[string]func() interface{}), - formats: strfmt.NewFormats(), - } - return api.WithJSONDefaults() -} - -// API represents an untyped mux for a swagger spec -type API struct { - spec *loads.Document - analyzer *analysis.Spec - DefaultProduces string - DefaultConsumes string - consumers map[string]runtime.Consumer - producers map[string]runtime.Producer - authenticators map[string]runtime.Authenticator - authorizer runtime.Authorizer - operations map[string]map[string]runtime.OperationHandler - ServeError func(http.ResponseWriter, *http.Request, error) - Models map[string]func() interface{} - formats strfmt.Registry -} - -// WithJSONDefaults loads the json defaults for this api -func (d *API) WithJSONDefaults() *API { - d.DefaultConsumes = runtime.JSONMime - d.DefaultProduces = runtime.JSONMime - d.consumers[runtime.JSONMime] = runtime.JSONConsumer() - d.producers[runtime.JSONMime] = runtime.JSONProducer() - return d -} - -// WithoutJSONDefaults clears the json defaults for this api -func (d *API) WithoutJSONDefaults() *API { - d.DefaultConsumes = "" - d.DefaultProduces = "" - delete(d.consumers, runtime.JSONMime) - delete(d.producers, runtime.JSONMime) - return d -} - -// Formats returns the registered string formats -func (d *API) Formats() strfmt.Registry { - if d.formats == nil { - d.formats = strfmt.NewFormats() - } - return d.formats -} - -// RegisterFormat registers a custom format validator -func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { - if d.formats == nil { - d.formats = strfmt.NewFormats() - } - d.formats.Add(name, format, validator) -} - -// RegisterAuth registers an auth handler in this api -func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { - if d.authenticators == nil { - d.authenticators = make(map[string]runtime.Authenticator) - } - d.authenticators[scheme] = handler -} - -// RegisterAuthorizer registers an authorizer handler in this api -func (d *API) RegisterAuthorizer(handler runtime.Authorizer) { - d.authorizer = handler -} - -// RegisterConsumer registers a consumer for a media type. -func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) { - if d.consumers == nil { - d.consumers = make(map[string]runtime.Consumer, 10) - } - d.consumers[strings.ToLower(mediaType)] = handler -} - -// RegisterProducer registers a producer for a media type -func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { - if d.producers == nil { - d.producers = make(map[string]runtime.Producer, 10) - } - d.producers[strings.ToLower(mediaType)] = handler -} - -// RegisterOperation registers an operation handler for an operation name -func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) { - if d.operations == nil { - d.operations = make(map[string]map[string]runtime.OperationHandler, 30) - } - um := strings.ToUpper(method) - if b, ok := d.operations[um]; !ok || b == nil { - d.operations[um] = make(map[string]runtime.OperationHandler) - } - d.operations[um][path] = handler -} - -// OperationHandlerFor returns the operation handler for the specified id if it can be found -func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) { - if d.operations == nil { - return nil, false - } - if pi, ok := d.operations[strings.ToUpper(method)]; ok { - h, ok := pi[path] - return h, ok - } - return nil, false -} - -// ConsumersFor gets the consumers for the specified media types -func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { - result := make(map[string]runtime.Consumer) - for _, mt := range mediaTypes { - if consumer, ok := d.consumers[mt]; ok { - result[mt] = consumer - } - } - return result -} - -// ProducersFor gets the producers for the specified media types -func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { - result := make(map[string]runtime.Producer) - for _, mt := range mediaTypes { - if producer, ok := d.producers[mt]; ok { - result[mt] = producer - } - } - return result -} - -// AuthenticatorsFor gets the authenticators for the specified security schemes -func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { - result := make(map[string]runtime.Authenticator) - for k := range schemes { - if a, ok := d.authenticators[k]; ok { - result[k] = a - } - } - return result -} - -// Authorizer returns the registered authorizer -func (d *API) Authorizer() runtime.Authorizer { - return d.authorizer -} - -// Validate validates this API for any missing items -func (d *API) Validate() error { - return d.validate() -} - -// validateWith validates the registrations in this API against the provided spec analyzer -func (d *API) validate() error { - var consumes []string - for k := range d.consumers { - consumes = append(consumes, k) - } - - var produces []string - for k := range d.producers { - produces = append(produces, k) - } - - var authenticators []string - for k := range d.authenticators { - authenticators = append(authenticators, k) - } - - var operations []string - for m, v := range d.operations { - for p := range v { - operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p)) - } - } - - var definedAuths []string - for k := range d.spec.Spec().SecurityDefinitions { - definedAuths = append(definedAuths, k) - } - - if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil { - return err - } - if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil { - return err - } - if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil { - return err - } - - requiredAuths := d.analyzer.RequiredSecuritySchemes() - if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil { - return err - } - if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil { - return err - } - return nil -} - -func (d *API) verify(name string, registrations []string, expectations []string) error { - sort.Strings(registrations) - sort.Strings(expectations) - - expected := map[string]struct{}{} - seen := map[string]struct{}{} - - for _, v := range expectations { - expected[v] = struct{}{} - } - - var unspecified []string - for _, v := range registrations { - seen[v] = struct{}{} - if _, ok := expected[v]; !ok { - unspecified = append(unspecified, v) - } - } - - for k := range seen { - delete(expected, k) - } - - var unregistered []string - for k := range expected { - unregistered = append(unregistered, k) - } - sort.Strings(unspecified) - sort.Strings(unregistered) - - if len(unregistered) > 0 || len(unspecified) > 0 { - return &errors.APIVerificationFailed{ - Section: name, - MissingSpecification: unspecified, - MissingRegistration: unregistered, - } - } - - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go deleted file mode 100644 index 9505c66c..00000000 --- a/vendor/github.com/go-openapi/runtime/middleware/validation.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "mime" - "net/http" - "strings" - - "github.com/go-openapi/errors" - "github.com/go-openapi/swag" - - "github.com/go-openapi/runtime" -) - -type validation struct { - context *Context - result []error - request *http.Request - route *MatchedRoute - bound map[string]interface{} -} - -// ContentType validates the content type of a request -func validateContentType(allowed []string, actual string) error { - debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", ")) - if len(allowed) == 0 { - return nil - } - mt, _, err := mime.ParseMediaType(actual) - if err != nil { - return errors.InvalidContentType(actual, allowed) - } - if swag.ContainsStringsCI(allowed, mt) { - return nil - } - if swag.ContainsStringsCI(allowed, "*/*") { - return nil - } - parts := strings.Split(actual, "/") - if len(parts) == 2 && swag.ContainsStringsCI(allowed, parts[0]+"/*") { - return nil - } - return errors.InvalidContentType(actual, allowed) -} - -func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation { - debugLog("validating request %s %s", request.Method, request.URL.EscapedPath()) - validate := &validation{ - context: ctx, - request: request, - route: route, - bound: make(map[string]interface{}), - } - - validate.contentType() - if len(validate.result) == 0 { - validate.responseFormat() - } - if len(validate.result) == 0 { - validate.parameters() - } - - return validate -} - -func (v *validation) parameters() { - debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) - if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil { - if result.Error() == "validation failure list" { - for _, e := range result.(*errors.Validation).Value.([]interface{}) { - v.result = append(v.result, e.(error)) - } - return - } - v.result = append(v.result, result) - } -} - -func (v *validation) contentType() { - if len(v.result) == 0 && runtime.HasBody(v.request) { - debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) - ct, _, req, err := v.context.ContentType(v.request) - if err != nil { - v.result = append(v.result, err) - } else { - v.request = req - } - - if len(v.result) == 0 { - if err := validateContentType(v.route.Consumes, ct); err != nil { - v.result = append(v.result, err) - } - } - if ct != "" && v.route.Consumer == nil { - cons, ok := v.route.Consumers[ct] - if !ok { - v.result = append(v.result, errors.New(500, "no consumer registered for %s", ct)) - } else { - v.route.Consumer = cons - } - } - } -} - -func (v *validation) responseFormat() { - if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" { - v.request = rCtx - v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces)) - } -} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go deleted file mode 100644 index 9e51b42b..00000000 --- a/vendor/github.com/go-openapi/runtime/request.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bufio" - "io" - "net/http" - "strings" - - "github.com/go-openapi/swag" -) - -// CanHaveBody returns true if this method can have a body -func CanHaveBody(method string) bool { - mn := strings.ToUpper(method) - return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" -} - -// IsSafe returns true if this is a request with a safe method -func IsSafe(r *http.Request) bool { - mn := strings.ToUpper(r.Method) - return mn == "GET" || mn == "HEAD" -} - -// AllowsBody returns true if the request allows for a body -func AllowsBody(r *http.Request) bool { - mn := strings.ToUpper(r.Method) - return mn != "HEAD" -} - -// HasBody returns true if this method needs a content-type -func HasBody(r *http.Request) bool { - // happy case: we have a content length set - if r.ContentLength > 0 { - return true - } - - if r.Header.Get(http.CanonicalHeaderKey("content-length")) != "" { - // in this case, no Transfer-Encoding should be present - // we have a header set but it was explicitly set to 0, so we assume no body - return false - } - - rdr := newPeekingReader(r.Body) - r.Body = rdr - return rdr.HasContent() -} - -func newPeekingReader(r io.ReadCloser) *peekingReader { - if r == nil { - return nil - } - return &peekingReader{ - underlying: bufio.NewReader(r), - orig: r, - } -} - -type peekingReader struct { - underlying interface { - Buffered() int - Peek(int) ([]byte, error) - Read([]byte) (int, error) - } - orig io.ReadCloser -} - -func (p *peekingReader) HasContent() bool { - if p == nil { - return false - } - if p.underlying.Buffered() > 0 { - return true - } - b, err := p.underlying.Peek(1) - if err != nil { - return false - } - return len(b) > 0 -} - -func (p *peekingReader) Read(d []byte) (int, error) { - if p == nil { - return 0, io.EOF - } - return p.underlying.Read(d) -} - -func (p *peekingReader) Close() error { - p.underlying = nil - if p.orig != nil { - return p.orig.Close() - } - return nil -} - -// JSONRequest creates a new http request with json headers set -func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(method, urlStr, body) - if err != nil { - return nil, err - } - req.Header.Add(HeaderContentType, JSONMime) - req.Header.Add(HeaderAccept, JSONMime) - return req, nil -} - -// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) -type Gettable interface { - GetOK(string) ([]string, bool, bool) -} - -// ReadSingleValue reads a single value from the source -func ReadSingleValue(values Gettable, name string) string { - vv, _, hv := values.GetOK(name) - if hv { - return vv[len(vv)-1] - } - return "" -} - -// ReadCollectionValue reads a collection value from a string data source -func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { - v := ReadSingleValue(values, name) - return swag.SplitByFormat(v, collectionFormat) -} diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go deleted file mode 100644 index 476d26c3..00000000 --- a/vendor/github.com/go-openapi/runtime/security/authenticator.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package security - -import ( - "context" - "net/http" - "strings" - - "github.com/go-openapi/errors" - - "github.com/go-openapi/runtime" -) - -const ( - query = "query" - header = "header" -) - -// HttpAuthenticator is a function that authenticates a HTTP request -func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { - return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { - if request, ok := params.(*http.Request); ok { - return handler(request) - } - if scoped, ok := params.(*ScopedAuthRequest); ok { - return handler(scoped.Request) - } - return false, nil, nil - }) -} - -// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes -func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, interface{}, error)) runtime.Authenticator { - return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { - if request, ok := params.(*ScopedAuthRequest); ok { - return handler(request) - } - return false, nil, nil - }) -} - -// UserPassAuthentication authentication function -type UserPassAuthentication func(string, string) (interface{}, error) - -// UserPassAuthenticationCtx authentication function with context.Context -type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, interface{}, error) - -// TokenAuthentication authentication function -type TokenAuthentication func(string) (interface{}, error) - -// TokenAuthenticationCtx authentication function with context.Context -type TokenAuthenticationCtx func(context.Context, string) (context.Context, interface{}, error) - -// ScopedTokenAuthentication authentication function -type ScopedTokenAuthentication func(string, []string) (interface{}, error) - -// ScopedTokenAuthenticationCtx authentication function with context.Context -type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, interface{}, error) - -var DefaultRealmName = "API" - -type secCtxKey uint8 - -const ( - failedBasicAuth secCtxKey = iota - oauth2SchemeName -) - -func FailedBasicAuth(r *http.Request) string { - return FailedBasicAuthCtx(r.Context()) -} - -func FailedBasicAuthCtx(ctx context.Context) string { - v, ok := ctx.Value(failedBasicAuth).(string) - if !ok { - return "" - } - return v -} - -func OAuth2SchemeName(r *http.Request) string { - return OAuth2SchemeNameCtx(r.Context()) -} - -func OAuth2SchemeNameCtx(ctx context.Context) string { - v, ok := ctx.Value(oauth2SchemeName).(string) - if !ok { - return "" - } - return v -} - -// BasicAuth creates a basic auth authenticator with the provided authentication function -func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator { - return BasicAuthRealm(DefaultRealmName, authenticate) -} - -// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name -func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator { - if realm == "" { - realm = DefaultRealmName - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - if usr, pass, ok := r.BasicAuth(); ok { - p, err := authenticate(usr, pass) - if err != nil { - *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) - } - return true, p, err - } - *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) - return false, nil, nil - }) -} - -// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context -func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator { - return BasicAuthRealmCtx(DefaultRealmName, authenticate) -} - -// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context -func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator { - if realm == "" { - realm = DefaultRealmName - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - if usr, pass, ok := r.BasicAuth(); ok { - ctx, p, err := authenticate(r.Context(), usr, pass) - if err != nil { - ctx = context.WithValue(ctx, failedBasicAuth, realm) - } - *r = *r.WithContext(ctx) - return true, p, err - } - *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) - return false, nil, nil - }) -} - -// APIKeyAuth creates an authenticator that uses a token for authorization. -// This token can be obtained from either a header or a query string -func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator { - inl := strings.ToLower(in) - if inl != query && inl != header { - // panic because this is most likely a typo - panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) - } - - var getToken func(*http.Request) string - switch inl { - case header: - getToken = func(r *http.Request) string { return r.Header.Get(name) } - case query: - getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - token := getToken(r) - if token == "" { - return false, nil, nil - } - - p, err := authenticate(token) - return true, p, err - }) -} - -// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context. -// This token can be obtained from either a header or a query string -func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator { - inl := strings.ToLower(in) - if inl != query && inl != header { - // panic because this is most likely a typo - panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) - } - - var getToken func(*http.Request) string - switch inl { - case header: - getToken = func(r *http.Request) string { return r.Header.Get(name) } - case query: - getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - token := getToken(r) - if token == "" { - return false, nil, nil - } - - ctx, p, err := authenticate(r.Context(), token) - *r = *r.WithContext(ctx) - return true, p, err - }) -} - -// ScopedAuthRequest contains both a http request and the required scopes for a particular operation -type ScopedAuthRequest struct { - Request *http.Request - RequiredScopes []string -} - -// BearerAuth for use with oauth2 flows -func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator { - const prefix = "Bearer " - return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { - var token string - hdr := r.Request.Header.Get("Authorization") - if strings.HasPrefix(hdr, prefix) { - token = strings.TrimPrefix(hdr, prefix) - } - if token == "" { - qs := r.Request.URL.Query() - token = qs.Get("access_token") - } - //#nosec - ct, _, _ := runtime.ContentType(r.Request.Header) - if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { - token = r.Request.FormValue("access_token") - } - - if token == "" { - return false, nil, nil - } - - rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) - *r.Request = *r.Request.WithContext(rctx) - p, err := authenticate(token, r.RequiredScopes) - return true, p, err - }) -} - -// BearerAuthCtx for use with oauth2 flows with support for context.Context. -func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator { - const prefix = "Bearer " - return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { - var token string - hdr := r.Request.Header.Get("Authorization") - if strings.HasPrefix(hdr, prefix) { - token = strings.TrimPrefix(hdr, prefix) - } - if token == "" { - qs := r.Request.URL.Query() - token = qs.Get("access_token") - } - //#nosec - ct, _, _ := runtime.ContentType(r.Request.Header) - if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { - token = r.Request.FormValue("access_token") - } - - if token == "" { - return false, nil, nil - } - - rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) - ctx, p, err := authenticate(rctx, token, r.RequiredScopes) - *r.Request = *r.Request.WithContext(ctx) - return true, p, err - }) -} diff --git a/vendor/github.com/go-openapi/runtime/security/authorizer.go b/vendor/github.com/go-openapi/runtime/security/authorizer.go deleted file mode 100644 index 00c1a4d6..00000000 --- a/vendor/github.com/go-openapi/runtime/security/authorizer.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package security - -import ( - "net/http" - - "github.com/go-openapi/runtime" -) - -// Authorized provides a default implementation of the Authorizer interface where all -// requests are authorized (successful) -func Authorized() runtime.Authorizer { - return runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil }) -} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go deleted file mode 100644 index 3b011a0b..00000000 --- a/vendor/github.com/go-openapi/runtime/statuses.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -// Statuses lists the most common HTTP status codes to default message -// taken from https://httpstatuses.com/ -var Statuses = map[int]string{ - 100: "Continue", - 101: "Switching Protocols", - 102: "Processing", - 103: "Checkpoint", - 122: "URI too long", - 200: "OK", - 201: "Created", - 202: "Accepted", - 203: "Request Processed", - 204: "No Content", - 205: "Reset Content", - 206: "Partial Content", - 207: "Multi-Status", - 208: "Already Reported", - 226: "IM Used", - 300: "Multiple Choices", - 301: "Moved Permanently", - 302: "Found", - 303: "See Other", - 304: "Not Modified", - 305: "Use Proxy", - 306: "Switch Proxy", - 307: "Temporary Redirect", - 308: "Permanent Redirect", - 400: "Bad Request", - 401: "Unauthorized", - 402: "Payment Required", - 403: "Forbidden", - 404: "Not Found", - 405: "Method Not Allowed", - 406: "Not Acceptable", - 407: "Proxy Authentication Required", - 408: "Request Timeout", - 409: "Conflict", - 410: "Gone", - 411: "Length Required", - 412: "Precondition Failed", - 413: "Request Entity Too Large", - 414: "Request-URI Too Long", - 415: "Unsupported Media Type", - 416: "Request Range Not Satisfiable", - 417: "Expectation Failed", - 418: "I'm a teapot", - 420: "Enhance Your Calm", - 422: "Unprocessable Entity", - 423: "Locked", - 424: "Failed Dependency", - 426: "Upgrade Required", - 428: "Precondition Required", - 429: "Too Many Requests", - 431: "Request Header Fields Too Large", - 444: "No Response", - 449: "Retry With", - 450: "Blocked by Windows Parental Controls", - 451: "Wrong Exchange Server", - 499: "Client Closed Request", - 500: "Internal Server Error", - 501: "Not Implemented", - 502: "Bad Gateway", - 503: "Service Unavailable", - 504: "Gateway Timeout", - 505: "HTTP Version Not Supported", - 506: "Variant Also Negotiates", - 507: "Insufficient Storage", - 508: "Loop Detected", - 509: "Bandwidth Limit Exceeded", - 510: "Not Extended", - 511: "Network Authentication Required", - 598: "Network read timeout error", - 599: "Network connect timeout error", -} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go deleted file mode 100644 index c7fd04c3..00000000 --- a/vendor/github.com/go-openapi/runtime/text.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - - "github.com/go-openapi/swag" -) - -// TextConsumer creates a new text consumer -func TextConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("TextConsumer requires a reader") // early exit - } - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(reader) - if err != nil { - return err - } - b := buf.Bytes() - - // If the buffer is empty, no need to unmarshal it, which causes a panic. - if len(b) == 0 { - data = "" - return nil - } - - if tu, ok := data.(encoding.TextUnmarshaler); ok { - err := tu.UnmarshalText(b) - if err != nil { - return fmt.Errorf("text consumer: %v", err) - } - - return nil - } - - t := reflect.TypeOf(data) - if data != nil && t.Kind() == reflect.Ptr { - v := reflect.Indirect(reflect.ValueOf(data)) - if t.Elem().Kind() == reflect.String { - v.SetString(string(b)) - return nil - } - } - - return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", - data, data, "can be resolved by supporting TextUnmarshaler interface") - }) -} - -// TextProducer creates a new text producer -func TextProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("TextProducer requires a writer") // early exit - } - - if data == nil { - return errors.New("no data given to produce text from") - } - - if tm, ok := data.(encoding.TextMarshaler); ok { - txt, err := tm.MarshalText() - if err != nil { - return fmt.Errorf("text producer: %v", err) - } - _, err = writer.Write(txt) - return err - } - - if str, ok := data.(error); ok { - _, err := writer.Write([]byte(str.Error())) - return err - } - - if str, ok := data.(fmt.Stringer); ok { - _, err := writer.Write([]byte(str.String())) - return err - } - - v := reflect.Indirect(reflect.ValueOf(data)) - if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { - b, err := swag.WriteJSON(data) - if err != nil { - return err - } - _, err = writer.Write(b) - return err - } - if v.Kind() != reflect.String { - return fmt.Errorf("%T is not a supported type by the TextProducer", data) - } - - _, err := writer.Write([]byte(v.String())) - return err - }) -} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go deleted file mode 100644 index 11f5732a..00000000 --- a/vendor/github.com/go-openapi/runtime/values.go +++ /dev/null @@ -1,19 +0,0 @@ -package runtime - -// Values typically represent parameters on a http request. -type Values map[string][]string - -// GetOK returns the values collection for the given key. -// When the key is present in the map it will return true for hasKey. -// When the value is not empty it will return true for hasValue. -func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { - value, hasKey = v[key] - if !hasKey { - return - } - if len(value) == 0 { - return - } - hasValue = true - return -} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go deleted file mode 100644 index 821c7393..00000000 --- a/vendor/github.com/go-openapi/runtime/xml.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/xml" - "io" -) - -// XMLConsumer creates a new XML consumer -func XMLConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - dec := xml.NewDecoder(reader) - return dec.Decode(data) - }) -} - -// XMLProducer creates a new XML producer -func XMLProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - enc := xml.NewEncoder(writer) - return enc.Encode(data) - }) -} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig deleted file mode 100644 index 3152da69..00000000 --- a/vendor/github.com/go-openapi/spec/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore deleted file mode 100644 index dd91ed6a..00000000 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml deleted file mode 100644 index 3e33f9f2..00000000 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ /dev/null @@ -1,23 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 2 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml deleted file mode 100644 index aa26d876..00000000 --- a/vendor/github.com/go-openapi/spec/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/spec/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md deleted file mode 100644 index 6354742c..00000000 --- a/vendor/github.com/go-openapi/spec/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/spec.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) - -The object model for OpenAPI specification documents. - -Currently supports Swagger 2.0. diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go deleted file mode 100644 index c67e2d87..00000000 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by go-bindata. DO NOT EDIT. -// sources: -// schemas/jsonschema-draft-04.json (4.357kB) -// schemas/v2/schema.json (40.249kB) - -package spec - -import ( - "bytes" - "compress/gzip" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo - digest [sha256.Size]byte -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") - -func jsonschemaDraft04JSONBytes() ([]byte, error) { - return bindataRead( - _jsonschemaDraft04JSON, - "jsonschema-draft-04.json", - ) -} - -func jsonschemaDraft04JSON() (*asset, error) { - bytes, err := jsonschemaDraft04JSONBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0644), modTime: time.Unix(1567900649, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} - return a, nil -} - -var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") - -func v2SchemaJSONBytes() ([]byte, error) { - return bindataRead( - _v2SchemaJSON, - "v2/schema.json", - ) -} - -func v2SchemaJSON() (*asset, error) { - bytes, err := v2SchemaJSONBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(0644), modTime: time.Unix(1567900649, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// AssetString returns the asset contents as a string (instead of a []byte). -func AssetString(name string) (string, error) { - data, err := Asset(name) - return string(data), err -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// MustAssetString is like AssetString but panics when Asset would return an -// error. It simplifies safe initialization of global variables. -func MustAssetString(name string) string { - return string(MustAsset(name)) -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetDigest returns the digest of the file with the given name. It returns an -// error if the asset could not be found or the digest could not be loaded. -func AssetDigest(name string) ([sha256.Size]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) - } - return a.digest, nil - } - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) -} - -// Digests returns a map of all known files and their checksums. -func Digests() (map[string][sha256.Size]byte, error) { - mp := make(map[string][sha256.Size]byte, len(_bindata)) - for name := range _bindata { - a, err := _bindata[name]() - if err != nil { - return nil, err - } - mp[name] = a.digest - } - return mp, nil -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "jsonschema-draft-04.json": jsonschemaDraft04JSON, - - "v2/schema.json": v2SchemaJSON, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"}, -// AssetDir("data/img") would return []string{"a.png", "b.png"}, -// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - canonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(canonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, - "v2": &bintree{nil, map[string]*bintree{ - "schema.json": &bintree{v2SchemaJSON, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory. -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) -} - -// RestoreAssets restores an asset under the given directory recursively. -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - canonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) -} diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go deleted file mode 100644 index 3fada0da..00000000 --- a/vendor/github.com/go-openapi/spec/cache.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import "sync" - -// ResolutionCache a cache for resolving urls -type ResolutionCache interface { - Get(string) (interface{}, bool) - Set(string, interface{}) -} - -type simpleCache struct { - lock sync.RWMutex - store map[string]interface{} -} - -// Get retrieves a cached URI -func (s *simpleCache) Get(uri string) (interface{}, bool) { - debugLog("getting %q from resolution cache", uri) - s.lock.RLock() - v, ok := s.store[uri] - debugLog("got %q from resolution cache: %t", uri, ok) - - s.lock.RUnlock() - return v, ok -} - -// Set caches a URI -func (s *simpleCache) Set(uri string, data interface{}) { - s.lock.Lock() - s.store[uri] = data - s.lock.Unlock() -} - -var resCache ResolutionCache - -func init() { - resCache = initResolutionCache() -} - -// initResolutionCache initializes the URI resolution cache -func initResolutionCache() ResolutionCache { - return &simpleCache{store: map[string]interface{}{ - "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), - "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), - }} -} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go deleted file mode 100644 index f285970a..00000000 --- a/vendor/github.com/go-openapi/spec/contact_info.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ContactInfo contact information for the exposed API. -// -// For more information: http://goo.gl/8us55a#contactObject -type ContactInfo struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Email string `json:"email,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go deleted file mode 100644 index 389c528f..00000000 --- a/vendor/github.com/go-openapi/spec/debug.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "fmt" - "log" - "os" - "path/filepath" - "runtime" -) - -var ( - // Debug is true when the SWAGGER_DEBUG env var is not empty. - // It enables a more verbose logging of this package. - Debug = os.Getenv("SWAGGER_DEBUG") != "" - // specLogger is a debug logger for this package - specLogger *log.Logger -) - -func init() { - debugOptions() -} - -func debugOptions() { - specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) -} - -func debugLog(msg string, args ...interface{}) { - // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() - if Debug { - _, file1, pos1, _ := runtime.Caller(1) - specLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go deleted file mode 100644 index 1e7fc8c4..00000000 --- a/vendor/github.com/go-openapi/spec/expander.go +++ /dev/null @@ -1,650 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ExpandOptions provides options for spec expand -type ExpandOptions struct { - RelativeBase string - SkipSchemas bool - ContinueOnError bool - AbsoluteCircularRef bool -} - -// ResolveRefWithBase resolves a reference against a context root with preservation of base path -func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return nil, err - } - specBasePath := "" - if opts != nil && opts.RelativeBase != "" { - specBasePath, _ = absPath(opts.RelativeBase) - } - - result := new(Schema) - if err := resolver.Resolve(ref, result, specBasePath); err != nil { - return nil, err - } - return result, nil -} - -// ResolveRef resolves a reference against a context root -// ref is guaranteed to be in root (no need to go to external files) -// ResolveRef is ONLY called from the code generation module -func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - res, _, err := ref.GetPointer().Get(root) - if err != nil { - panic(err) - } - switch sch := res.(type) { - case Schema: - return &sch, nil - case *Schema: - return sch, nil - case map[string]interface{}: - b, _ := json.Marshal(sch) - newSch := new(Schema) - _ = json.Unmarshal(b, newSch) - return newSch, nil - default: - return nil, fmt.Errorf("unknown type for the resolved reference") - } -} - -// ResolveParameter resolves a parameter reference against a context root -func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { - return ResolveParameterWithBase(root, ref, nil) -} - -// ResolveParameterWithBase resolves a parameter reference against a context root and base path -func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) { - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return nil, err - } - - result := new(Parameter) - if err := resolver.Resolve(&ref, result, ""); err != nil { - return nil, err - } - return result, nil -} - -// ResolveResponse resolves response a reference against a context root -func ResolveResponse(root interface{}, ref Ref) (*Response, error) { - return ResolveResponseWithBase(root, ref, nil) -} - -// ResolveResponseWithBase resolves response a reference against a context root and base path -func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) { - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return nil, err - } - - result := new(Response) - if err := resolver.Resolve(&ref, result, ""); err != nil { - return nil, err - } - return result, nil -} - -// ResolveItems resolves parameter items reference against a context root and base path. -// -// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. -// Similarly, $ref are forbidden in response headers. -func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return nil, err - } - basePath := "" - if opts.RelativeBase != "" { - basePath = opts.RelativeBase - } - result := new(Items) - if err := resolver.Resolve(&ref, result, basePath); err != nil { - return nil, err - } - return result, nil -} - -// ResolvePathItem resolves response a path item against a context root and base path -func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) { - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return nil, err - } - basePath := "" - if opts.RelativeBase != "" { - basePath = opts.RelativeBase - } - result := new(PathItem) - if err := resolver.Resolve(&ref, result, basePath); err != nil { - return nil, err - } - return result, nil -} - -// ExpandSpec expands the references in a swagger spec -func ExpandSpec(spec *Swagger, options *ExpandOptions) error { - resolver, err := defaultSchemaLoader(spec, options, nil, nil) - // Just in case this ever returns an error. - if resolver.shouldStopOnError(err) { - return err - } - - // getting the base path of the spec to adjust all subsequent reference resolutions - specBasePath := "" - if options != nil && options.RelativeBase != "" { - specBasePath, _ = absPath(options.RelativeBase) - } - - if options == nil || !options.SkipSchemas { - for key, definition := range spec.Definitions { - var def *Schema - var err error - if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - if def != nil { - spec.Definitions[key] = *def - } - } - } - - for key := range spec.Parameters { - parameter := spec.Parameters[key] - if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Parameters[key] = parameter - } - - for key := range spec.Responses { - response := spec.Responses[key] - if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Responses[key] = response - } - - if spec.Paths != nil { - for key := range spec.Paths.Paths { - path := spec.Paths.Paths[key] - if err := expandPathItem(&path, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Paths.Paths[key] = path - } - } - - return nil -} - -// baseForRoot loads in the cache the root document and produces a fake "root" base path entry -// for further $ref resolution -func baseForRoot(root interface{}, cache ResolutionCache) string { - // cache the root document to resolve $ref's - const rootBase = "root" - if root != nil { - base, _ := absPath(rootBase) - normalizedBase := normalizeAbsPath(base) - debugLog("setting root doc in cache at: %s", normalizedBase) - if cache == nil { - cache = resCache - } - cache.Set(normalizedBase, root) - return rootBase - } - return "" -} - -// ExpandSchema expands the refs in the schema object with reference to the root object -// go-openapi/validate uses this function -// notice that it is impossible to reference a json schema in a different file other than root -func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - opts := &ExpandOptions{ - // when a root is specified, cache the root as an in-memory document for $ref retrieval - RelativeBase: baseForRoot(root, cache), - SkipSchemas: false, - ContinueOnError: false, - // when no base path is specified, remaining $ref (circular) are rendered with an absolute path - AbsoluteCircularRef: true, - } - return ExpandSchemaWithBasePath(schema, cache, opts) -} - -// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options -func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { - if schema == nil { - return nil - } - - var basePath string - if opts.RelativeBase != "" { - basePath, _ = absPath(opts.RelativeBase) - } - - resolver, err := defaultSchemaLoader(nil, opts, cache, nil) - if err != nil { - return err - } - - refs := []string{""} - var s *Schema - if s, err = expandSchema(*schema, refs, resolver, basePath); err != nil { - return err - } - *schema = *s - return nil -} - -func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - if target.Items != nil { - if target.Items.Schema != nil { - t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) - if err != nil { - return nil, err - } - *target.Items.Schema = *t - } - for i := range target.Items.Schemas { - t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) - if err != nil { - return nil, err - } - target.Items.Schemas[i] = *t - } - } - return &target, nil -} - -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - if target.Ref.String() == "" && target.Ref.IsRoot() { - // normalizing is important - newRef := normalizeFileRef(&target.Ref, basePath) - target.Ref = *newRef - return &target, nil - - } - - // change the base path of resolution when an ID is encountered - // otherwise the basePath should inherit the parent's - // important: ID can be relative path - if target.ID != "" { - debugLog("schema has ID: %s", target.ID) - // handling the case when id is a folder - // remember that basePath has to be a file - refPath := target.ID - if strings.HasSuffix(target.ID, "/") { - // path.Clean here would not work correctly if basepath is http - refPath = fmt.Sprintf("%s%s", refPath, "placeholder.json") - } - basePath = normalizePaths(refPath, basePath) - } - - var t *Schema - // if Ref is found, everything else doesn't matter - // Ref also changes the resolution scope of children expandSchema - if target.Ref.String() != "" { - // here the resolution scope is changed because a $ref was encountered - normalizedRef := normalizeFileRef(&target.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - // this means there is a cycle in the recursion tree: return the Ref - // - circular refs cannot be expanded. We leave them as ref. - // - denormalization means that a new local file ref is set relative to the original basePath - debugLog("shortcut circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", - basePath, normalizedBasePath, normalizedRef.String()) - if !resolver.options.AbsoluteCircularRef { - target.Ref = *denormalizeFileRef(normalizedRef, normalizedBasePath, resolver.context.basePath) - } else { - target.Ref = *normalizedRef - } - return &target, nil - } - - debugLog("basePath: %s: calling Resolve with target: %#v", basePath, target) - if err := resolver.Resolve(&target.Ref, &t, basePath); resolver.shouldStopOnError(err) { - return nil, err - } - - if t != nil { - parentRefs = append(parentRefs, normalizedRef.String()) - var err error - transitiveResolver, err := resolver.transitiveResolver(basePath, target.Ref) - if transitiveResolver.shouldStopOnError(err) { - return nil, err - } - - basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) - - return expandSchema(*t, parentRefs, transitiveResolver, basePath) - } - } - - t, err := expandItems(target, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target = *t - } - - for i := range target.AllOf { - t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - target.AllOf[i] = *t - } - for i := range target.AnyOf { - t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - target.AnyOf[i] = *t - } - for i := range target.OneOf { - t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.OneOf[i] = *t - } - } - if target.Not != nil { - t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.Not = *t - } - } - for k := range target.Properties { - t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.Properties[k] = *t - } - } - if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.AdditionalProperties.Schema = *t - } - } - for k := range target.PatternProperties { - t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.PatternProperties[k] = *t - } - } - for k := range target.Dependencies { - if target.Dependencies[k].Schema != nil { - t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.Dependencies[k].Schema = *t - } - } - } - if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.AdditionalItems.Schema = *t - } - } - for k := range target.Definitions { - t, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.Definitions[k] = *t - } - } - return &target, nil -} - -func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { - if pathItem == nil { - return nil - } - - parentRefs := []string{} - if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - if pathItem.Ref.String() != "" { - var err error - resolver, err = resolver.transitiveResolver(basePath, pathItem.Ref) - if resolver.shouldStopOnError(err) { - return err - } - } - pathItem.Ref = Ref{} - - for idx := range pathItem.Parameters { - if err := expandParameterOrResponse(&(pathItem.Parameters[idx]), resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - } - ops := []*Operation{ - pathItem.Get, - pathItem.Head, - pathItem.Options, - pathItem.Put, - pathItem.Post, - pathItem.Patch, - pathItem.Delete, - } - for _, op := range ops { - if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - } - return nil -} - -func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { - if op == nil { - return nil - } - - for i := range op.Parameters { - param := op.Parameters[i] - if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - op.Parameters[i] = param - } - - if op.Responses != nil { - responses := op.Responses - if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - for code := range responses.StatusCodeResponses { - response := responses.StatusCodeResponses[code] - if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - responses.StatusCodeResponses[code] = response - } - } - return nil -} - -// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document -func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { - opts := &ExpandOptions{ - RelativeBase: baseForRoot(root, cache), - SkipSchemas: false, - ContinueOnError: false, - // when no base path is specified, remaining $ref (circular) are rendered with an absolute path - AbsoluteCircularRef: true, - } - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return err - } - - return expandParameterOrResponse(response, resolver, opts.RelativeBase) -} - -// ExpandResponse expands a response based on a basepath -// This is the exported version of expandResponse -// all refs inside response will be resolved relative to basePath -func ExpandResponse(response *Response, basePath string) error { - var specBasePath string - if basePath != "" { - specBasePath, _ = absPath(basePath) - } - opts := &ExpandOptions{ - RelativeBase: specBasePath, - } - resolver, err := defaultSchemaLoader(nil, opts, nil, nil) - if err != nil { - return err - } - - return expandParameterOrResponse(response, resolver, opts.RelativeBase) -} - -// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document -func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { - opts := &ExpandOptions{ - RelativeBase: baseForRoot(root, cache), - SkipSchemas: false, - ContinueOnError: false, - // when no base path is specified, remaining $ref (circular) are rendered with an absolute path - AbsoluteCircularRef: true, - } - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return err - } - - return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) -} - -// ExpandParameter expands a parameter based on a basepath. -// This is the exported version of expandParameter -// all refs inside parameter will be resolved relative to basePath -func ExpandParameter(parameter *Parameter, basePath string) error { - var specBasePath string - if basePath != "" { - specBasePath, _ = absPath(basePath) - } - opts := &ExpandOptions{ - RelativeBase: specBasePath, - } - resolver, err := defaultSchemaLoader(nil, opts, nil, nil) - if err != nil { - return err - } - - return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) -} - -func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { - var ref *Ref - var sch *Schema - switch refable := input.(type) { - case *Parameter: - if refable == nil { - return nil, nil, nil - } - ref = &refable.Ref - sch = refable.Schema - case *Response: - if refable == nil { - return nil, nil, nil - } - ref = &refable.Ref - sch = refable.Schema - default: - return nil, nil, fmt.Errorf("expand: unsupported type %T. Input should be of type *Parameter or *Response", input) - } - return ref, sch, nil -} - -func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { - ref, _, err := getRefAndSchema(input) - if err != nil { - return err - } - if ref == nil { - return nil - } - parentRefs := []string{} - if err := resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - ref, sch, _ := getRefAndSchema(input) - if ref.String() != "" { - transitiveResolver, err := resolver.transitiveResolver(basePath, *ref) - if transitiveResolver.shouldStopOnError(err) { - return err - } - basePath = resolver.updateBasePath(transitiveResolver, basePath) - resolver = transitiveResolver - } - - if sch != nil && sch.Ref.String() != "" { - // schema expanded to a $ref in another root - var ern error - sch.Ref, ern = NewRef(normalizePaths(sch.Ref.String(), ref.RemoteURI())) - if ern != nil { - return ern - } - } - if ref != nil { - *ref = Ref{} - } - - if !resolver.options.SkipSchemas && sch != nil { - s, err := expandSchema(*sch, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return err - } - *sch = *s - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go deleted file mode 100644 index 88add91b..00000000 --- a/vendor/github.com/go-openapi/spec/external_docs.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ExternalDocumentation allows referencing an external resource for -// extended documentation. -// -// For more information: http://goo.gl/8us55a#externalDocumentationObject -type ExternalDocumentation struct { - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/go.mod b/vendor/github.com/go-openapi/spec/go.mod deleted file mode 100644 index 02a142c0..00000000 --- a/vendor/github.com/go-openapi/spec/go.mod +++ /dev/null @@ -1,17 +0,0 @@ -module github.com/go-openapi/spec - -require ( - github.com/go-openapi/jsonpointer v0.19.3 - github.com/go-openapi/jsonreference v0.19.2 - github.com/go-openapi/swag v0.19.5 - github.com/kr/pty v1.1.5 // indirect - github.com/stretchr/objx v0.2.0 // indirect - github.com/stretchr/testify v1.3.0 - golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 // indirect - golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect - golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f // indirect - golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 // indirect - gopkg.in/yaml.v2 v2.2.2 -) - -go 1.13 diff --git a/vendor/github.com/go-openapi/spec/go.sum b/vendor/github.com/go-openapi/spec/go.sum deleted file mode 100644 index 86db601c..00000000 --- a/vendor/github.com/go-openapi/spec/go.sum +++ /dev/null @@ -1,74 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.17.0 h1:nH6xp8XdXHx8dqveo0ZuJBluCO2qGrPbDNZ0dwoRHP0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= -github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= -github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/swag v0.17.0 h1:iqrgMg7Q7SvtbWLlltPrkMs0UBJI6oTSs79JFRUi880= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go deleted file mode 100644 index 39efe452..00000000 --- a/vendor/github.com/go-openapi/spec/header.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - jsonArray = "array" -) - -// HeaderProps describes a response header -type HeaderProps struct { - Description string `json:"description,omitempty"` -} - -// Header describes a header for a response of the API -// -// For more information: http://goo.gl/8us55a#headerObject -type Header struct { - CommonValidations - SimpleSchema - VendorExtensible - HeaderProps -} - -// ResponseHeader creates a new header instance for use in a response -func ResponseHeader() *Header { - return new(Header) -} - -// WithDescription sets the description on this response, allows for chaining -func (h *Header) WithDescription(description string) *Header { - h.Description = description - return h -} - -// Typed a fluent builder method for the type of parameter -func (h *Header) Typed(tpe, format string) *Header { - h.Type = tpe - h.Format = format - return h -} - -// CollectionOf a fluent builder method for an array item -func (h *Header) CollectionOf(items *Items, format string) *Header { - h.Type = jsonArray - h.Items = items - h.CollectionFormat = format - return h -} - -// WithDefault sets the default value on this item -func (h *Header) WithDefault(defaultValue interface{}) *Header { - h.Default = defaultValue - return h -} - -// WithMaxLength sets a max length value -func (h *Header) WithMaxLength(max int64) *Header { - h.MaxLength = &max - return h -} - -// WithMinLength sets a min length value -func (h *Header) WithMinLength(min int64) *Header { - h.MinLength = &min - return h -} - -// WithPattern sets a pattern value -func (h *Header) WithPattern(pattern string) *Header { - h.Pattern = pattern - return h -} - -// WithMultipleOf sets a multiple of value -func (h *Header) WithMultipleOf(number float64) *Header { - h.MultipleOf = &number - return h -} - -// WithMaximum sets a maximum number value -func (h *Header) WithMaximum(max float64, exclusive bool) *Header { - h.Maximum = &max - h.ExclusiveMaximum = exclusive - return h -} - -// WithMinimum sets a minimum number value -func (h *Header) WithMinimum(min float64, exclusive bool) *Header { - h.Minimum = &min - h.ExclusiveMinimum = exclusive - return h -} - -// WithEnum sets a the enum values (replace) -func (h *Header) WithEnum(values ...interface{}) *Header { - h.Enum = append([]interface{}{}, values...) - return h -} - -// WithMaxItems sets the max items -func (h *Header) WithMaxItems(size int64) *Header { - h.MaxItems = &size - return h -} - -// WithMinItems sets the min items -func (h *Header) WithMinItems(size int64) *Header { - h.MinItems = &size - return h -} - -// UniqueValues dictates that this array can only have unique items -func (h *Header) UniqueValues() *Header { - h.UniqueItems = true - return h -} - -// AllowDuplicates this array can have duplicates -func (h *Header) AllowDuplicates() *Header { - h.UniqueItems = false - return h -} - -// MarshalJSON marshal this to JSON -func (h Header) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(h.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(h.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(h.HeaderProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// UnmarshalJSON unmarshals this header from JSON -func (h *Header) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &h.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &h.HeaderProps) -} - -// JSONLookup look up a value by the json property name -func (h Header) JSONLookup(token string) (interface{}, error) { - if ex, ok := h.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) - return r, err -} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go deleted file mode 100644 index c458b49b..00000000 --- a/vendor/github.com/go-openapi/spec/info.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Extensions vendor specific extensions -type Extensions map[string]interface{} - -// Add adds a value to these extensions -func (e Extensions) Add(key string, value interface{}) { - realKey := strings.ToLower(key) - e[realKey] = value -} - -// GetString gets a string value from the extensions -func (e Extensions) GetString(key string) (string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(string) - return str, ok - } - return "", false -} - -// GetBool gets a string value from the extensions -func (e Extensions) GetBool(key string) (bool, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(bool) - return str, ok - } - return false, false -} - -// GetStringSlice gets a string value from the extensions -func (e Extensions) GetStringSlice(key string) ([]string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - arr, isSlice := v.([]interface{}) - if !isSlice { - return nil, false - } - var strs []string - for _, iface := range arr { - str, isString := iface.(string) - if !isString { - return nil, false - } - strs = append(strs, str) - } - return strs, ok - } - return nil, false -} - -// VendorExtensible composition block. -type VendorExtensible struct { - Extensions Extensions -} - -// AddExtension adds an extension to this extensible object -func (v *VendorExtensible) AddExtension(key string, value interface{}) { - if value == nil { - return - } - if v.Extensions == nil { - v.Extensions = make(map[string]interface{}) - } - v.Extensions.Add(key, value) -} - -// MarshalJSON marshals the extensions to json -func (v VendorExtensible) MarshalJSON() ([]byte, error) { - toser := make(map[string]interface{}) - for k, v := range v.Extensions { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - toser[k] = v - } - } - return json.Marshal(toser) -} - -// UnmarshalJSON for this extensible object -func (v *VendorExtensible) UnmarshalJSON(data []byte) error { - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if v.Extensions == nil { - v.Extensions = map[string]interface{}{} - } - v.Extensions[k] = vv - } - } - return nil -} - -// InfoProps the properties for an info definition -type InfoProps struct { - Description string `json:"description,omitempty"` - Title string `json:"title,omitempty"` - TermsOfService string `json:"termsOfService,omitempty"` - Contact *ContactInfo `json:"contact,omitempty"` - License *License `json:"license,omitempty"` - Version string `json:"version,omitempty"` -} - -// Info object provides metadata about the API. -// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. -// -// For more information: http://goo.gl/8us55a#infoObject -type Info struct { - VendorExtensible - InfoProps -} - -// JSONLookup look up a value by the json property name -func (i Info) JSONLookup(token string) (interface{}, error) { - if ex, ok := i.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(i.InfoProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (i Info) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.InfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (i *Info) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &i.InfoProps); err != nil { - return err - } - return json.Unmarshal(data, &i.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go deleted file mode 100644 index 365d1631..00000000 --- a/vendor/github.com/go-openapi/spec/items.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - jsonRef = "$ref" -) - -// SimpleSchema describe swagger simple schemas for parameters and headers -type SimpleSchema struct { - Type string `json:"type,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Format string `json:"format,omitempty"` - Items *Items `json:"items,omitempty"` - CollectionFormat string `json:"collectionFormat,omitempty"` - Default interface{} `json:"default,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// TypeName return the type (or format) of a simple schema -func (s *SimpleSchema) TypeName() string { - if s.Format != "" { - return s.Format - } - return s.Type -} - -// ItemsTypeName yields the type of items in a simple schema array -func (s *SimpleSchema) ItemsTypeName() string { - if s.Items == nil { - return "" - } - return s.Items.TypeName() -} - -// CommonValidations describe common JSON-schema validations -type CommonValidations struct { - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` -} - -// Items a limited subset of JSON-Schema's items object. -// It is used by parameter definitions that are not located in "body". -// -// For more information: http://goo.gl/8us55a#items-object -type Items struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible -} - -// NewItems creates a new instance of items -func NewItems() *Items { - return &Items{} -} - -// Typed a fluent builder method for the type of item -func (i *Items) Typed(tpe, format string) *Items { - i.Type = tpe - i.Format = format - return i -} - -// AsNullable flags this schema as nullable. -func (i *Items) AsNullable() *Items { - i.Nullable = true - return i -} - -// CollectionOf a fluent builder method for an array item -func (i *Items) CollectionOf(items *Items, format string) *Items { - i.Type = jsonArray - i.Items = items - i.CollectionFormat = format - return i -} - -// WithDefault sets the default value on this item -func (i *Items) WithDefault(defaultValue interface{}) *Items { - i.Default = defaultValue - return i -} - -// WithMaxLength sets a max length value -func (i *Items) WithMaxLength(max int64) *Items { - i.MaxLength = &max - return i -} - -// WithMinLength sets a min length value -func (i *Items) WithMinLength(min int64) *Items { - i.MinLength = &min - return i -} - -// WithPattern sets a pattern value -func (i *Items) WithPattern(pattern string) *Items { - i.Pattern = pattern - return i -} - -// WithMultipleOf sets a multiple of value -func (i *Items) WithMultipleOf(number float64) *Items { - i.MultipleOf = &number - return i -} - -// WithMaximum sets a maximum number value -func (i *Items) WithMaximum(max float64, exclusive bool) *Items { - i.Maximum = &max - i.ExclusiveMaximum = exclusive - return i -} - -// WithMinimum sets a minimum number value -func (i *Items) WithMinimum(min float64, exclusive bool) *Items { - i.Minimum = &min - i.ExclusiveMinimum = exclusive - return i -} - -// WithEnum sets a the enum values (replace) -func (i *Items) WithEnum(values ...interface{}) *Items { - i.Enum = append([]interface{}{}, values...) - return i -} - -// WithMaxItems sets the max items -func (i *Items) WithMaxItems(size int64) *Items { - i.MaxItems = &size - return i -} - -// WithMinItems sets the min items -func (i *Items) WithMinItems(size int64) *Items { - i.MinItems = &size - return i -} - -// UniqueValues dictates that this array can only have unique items -func (i *Items) UniqueValues() *Items { - i.UniqueItems = true - return i -} - -// AllowDuplicates this array can have duplicates -func (i *Items) AllowDuplicates() *Items { - i.UniqueItems = false - return i -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (i *Items) UnmarshalJSON(data []byte) error { - var validations CommonValidations - if err := json.Unmarshal(data, &validations); err != nil { - return err - } - var ref Refable - if err := json.Unmarshal(data, &ref); err != nil { - return err - } - var simpleSchema SimpleSchema - if err := json.Unmarshal(data, &simpleSchema); err != nil { - return err - } - var vendorExtensible VendorExtensible - if err := json.Unmarshal(data, &vendorExtensible); err != nil { - return err - } - i.Refable = ref - i.CommonValidations = validations - i.SimpleSchema = simpleSchema - i.VendorExtensible = vendorExtensible - return nil -} - -// MarshalJSON converts this items object to JSON -func (i Items) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(i.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b4, b3, b1, b2), nil -} - -// JSONLookup look up a value by the json property name -func (i Items) JSONLookup(token string) (interface{}, error) { - if token == jsonRef { - return &i.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) - return r, err -} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go deleted file mode 100644 index f20961b4..00000000 --- a/vendor/github.com/go-openapi/spec/license.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// License information for the exposed API. -// -// For more information: http://goo.gl/8us55a#licenseObject -type License struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go deleted file mode 100644 index b8957e7c..00000000 --- a/vendor/github.com/go-openapi/spec/normalizer.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "fmt" - "net/url" - "os" - "path" - "path/filepath" - "strings" -) - -// normalize absolute path for cache. -// on Windows, drive letters should be converted to lower as scheme in net/url.URL -func normalizeAbsPath(path string) string { - u, err := url.Parse(path) - if err != nil { - debugLog("normalize absolute path failed: %s", err) - return path - } - return u.String() -} - -// base or refPath could be a file path or a URL -// given a base absolute path and a ref path, return the absolute path of refPath -// 1) if refPath is absolute, return it -// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists -// base could be a directory or a full file path -func normalizePaths(refPath, base string) string { - refURL, _ := url.Parse(refPath) - if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { - // refPath is actually absolute - if refURL.Host != "" { - return refPath - } - parts := strings.Split(refPath, "#") - result := filepath.FromSlash(parts[0]) - if len(parts) == 2 { - result += "#" + parts[1] - } - return result - } - - // relative refPath - baseURL, _ := url.Parse(base) - if !strings.HasPrefix(refPath, "#") { - // combining paths - if baseURL.Host != "" { - baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) - } else { // base is a file - newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment) - return newBase - } - - } - // copying fragment from ref to base - baseURL.Fragment = refURL.Fragment - return baseURL.String() -} - -// denormalizePaths returns to simplest notation on file $ref, -// i.e. strips the absolute path and sets a path relative to the base path. -// -// This is currently used when we rewrite ref after a circular ref has been detected -func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref { - debugLog("denormalizeFileRef for: %s", ref.String()) - - if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { - return ref - } - // strip relativeBase from URI - relativeBaseURL, _ := url.Parse(relativeBase) - relativeBaseURL.Fragment = "" - - if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) { - // this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix - r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase)) - return &r - } - - if relativeBaseURL.IsAbs() { - // other absolute URL get unchanged (i.e. with a non-empty scheme) - return ref - } - - // for relative file URIs: - originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) - originalRelativeBaseURL.Fragment = "" - if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) { - // the resulting ref is in the expanded spec: return a local ref - r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String())) - return &r - } - - // check if we may set a relative path, considering the original base path for this spec. - // Example: - // spec is located at /mypath/spec.json - // my normalized ref points to: /mypath/item.json#/target - // expected result: item.json#/target - parts := strings.Split(ref.String(), "#") - relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0]) - if err != nil { - // there is no common ancestor (e.g. different drives on windows) - // leaves the ref unchanged - return ref - } - if len(parts) == 2 { - relativePath += "#" + parts[1] - } - r, _ := NewRef(relativePath) - return &r -} - -// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL -func normalizeFileRef(ref *Ref, relativeBase string) *Ref { - // This is important for when the reference is pointing to the root schema - if ref.String() == "" { - r, _ := NewRef(relativeBase) - return &r - } - - debugLog("normalizing %s against %s", ref.String(), relativeBase) - - s := normalizePaths(ref.String(), relativeBase) - r, _ := NewRef(s) - return &r -} - -// absPath returns the absolute path of a file -func absPath(fname string) (string, error) { - if strings.HasPrefix(fname, "http") { - return fname, nil - } - if filepath.IsAbs(fname) { - return fname, nil - } - wd, err := os.Getwd() - return filepath.Join(wd, fname), err -} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go deleted file mode 100644 index b1ebd599..00000000 --- a/vendor/github.com/go-openapi/spec/operation.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "sort" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -func init() { - //gob.Register(map[string][]interface{}{}) - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) -} - -// OperationProps describes an operation -// -// NOTES: -// - schemes, when present must be from [http, https, ws, wss]: see validate -// - Security is handled as a special case: see MarshalJSON function -type OperationProps struct { - Description string `json:"description,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Tags []string `json:"tags,omitempty"` - Summary string `json:"summary,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - ID string `json:"operationId,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` - Responses *Responses `json:"responses,omitempty"` -} - -// MarshalJSON takes care of serializing operation properties to JSON -// -// We use a custom marhaller here to handle a special cases related to -// the Security field. We need to preserve zero length slice -// while omitting the field when the value is nil/unset. -func (op OperationProps) MarshalJSON() ([]byte, error) { - type Alias OperationProps - if op.Security == nil { - return json.Marshal(&struct { - Security []map[string][]string `json:"security,omitempty"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) - } - return json.Marshal(&struct { - Security []map[string][]string `json:"security"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) -} - -// Operation describes a single API operation on a path. -// -// For more information: http://goo.gl/8us55a#operationObject -type Operation struct { - VendorExtensible - OperationProps -} - -// SuccessResponse gets a success response model -func (o *Operation) SuccessResponse() (*Response, int, bool) { - if o.Responses == nil { - return nil, 0, false - } - - responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) - for k := range o.Responses.StatusCodeResponses { - if k >= 200 && k < 300 { - responseCodes = append(responseCodes, k) - } - } - if len(responseCodes) > 0 { - sort.Ints(responseCodes) - v := o.Responses.StatusCodeResponses[responseCodes[0]] - return &v, responseCodes[0], true - } - - return o.Responses.Default, 0, false -} - -// JSONLookup look up a value by the json property name -func (o Operation) JSONLookup(token string) (interface{}, error) { - if ex, ok := o.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(o.OperationProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (o *Operation) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &o.OperationProps); err != nil { - return err - } - return json.Unmarshal(data, &o.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (o Operation) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(o.OperationProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(o.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// NewOperation creates a new operation instance. -// It expects an ID as parameter but not passing an ID is also valid. -func NewOperation(id string) *Operation { - op := new(Operation) - op.ID = id - return op -} - -// WithID sets the ID property on this operation, allows for chaining. -func (o *Operation) WithID(id string) *Operation { - o.ID = id - return o -} - -// WithDescription sets the description on this operation, allows for chaining -func (o *Operation) WithDescription(description string) *Operation { - o.Description = description - return o -} - -// WithSummary sets the summary on this operation, allows for chaining -func (o *Operation) WithSummary(summary string) *Operation { - o.Summary = summary - return o -} - -// WithExternalDocs sets/removes the external docs for/from this operation. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (o *Operation) WithExternalDocs(description, url string) *Operation { - if description == "" && url == "" { - o.ExternalDocs = nil - return o - } - - if o.ExternalDocs == nil { - o.ExternalDocs = &ExternalDocumentation{} - } - o.ExternalDocs.Description = description - o.ExternalDocs.URL = url - return o -} - -// Deprecate marks the operation as deprecated -func (o *Operation) Deprecate() *Operation { - o.Deprecated = true - return o -} - -// Undeprecate marks the operation as not deprected -func (o *Operation) Undeprecate() *Operation { - o.Deprecated = false - return o -} - -// WithConsumes adds media types for incoming body values -func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { - o.Consumes = append(o.Consumes, mediaTypes...) - return o -} - -// WithProduces adds media types for outgoing body values -func (o *Operation) WithProduces(mediaTypes ...string) *Operation { - o.Produces = append(o.Produces, mediaTypes...) - return o -} - -// WithTags adds tags for this operation -func (o *Operation) WithTags(tags ...string) *Operation { - o.Tags = append(o.Tags, tags...) - return o -} - -// AddParam adds a parameter to this operation, when a parameter for that location -// and with that name already exists it will be replaced -func (o *Operation) AddParam(param *Parameter) *Operation { - if param == nil { - return o - } - - for i, p := range o.Parameters { - if p.Name == param.Name && p.In == param.In { - params := append(o.Parameters[:i], *param) - params = append(params, o.Parameters[i+1:]...) - o.Parameters = params - return o - } - } - - o.Parameters = append(o.Parameters, *param) - return o -} - -// RemoveParam removes a parameter from the operation -func (o *Operation) RemoveParam(name, in string) *Operation { - for i, p := range o.Parameters { - if p.Name == name && p.In == in { - o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) - return o - } - } - return o -} - -// SecuredWith adds a security scope to this operation. -func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { - o.Security = append(o.Security, map[string][]string{name: scopes}) - return o -} - -// WithDefaultResponse adds a default response to the operation. -// Passing a nil value will remove the response -func (o *Operation) WithDefaultResponse(response *Response) *Operation { - return o.RespondsWith(0, response) -} - -// RespondsWith adds a status code response to the operation. -// When the code is 0 the value of the response will be used as default response value. -// When the value of the response is nil it will be removed from the operation -func (o *Operation) RespondsWith(code int, response *Response) *Operation { - if o.Responses == nil { - o.Responses = new(Responses) - } - if code == 0 { - o.Responses.Default = response - return o - } - if response == nil { - delete(o.Responses.StatusCodeResponses, code) - return o - } - if o.Responses.StatusCodeResponses == nil { - o.Responses.StatusCodeResponses = make(map[int]Response) - } - o.Responses.StatusCodeResponses[code] = *response - return o -} - -type opsAlias OperationProps - -type gobAlias struct { - Security []map[string]struct { - List []string - Pad bool - } - Alias *opsAlias - SecurityIsEmpty bool -} - -// GobEncode provides a safe gob encoder for Operation, including empty security requirements -func (o Operation) GobEncode() ([]byte, error) { - raw := struct { - Ext VendorExtensible - Props OperationProps - }{ - Ext: o.VendorExtensible, - Props: o.OperationProps, - } - var b bytes.Buffer - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Operation, including empty security requirements -func (o *Operation) GobDecode(b []byte) error { - var raw struct { - Ext VendorExtensible - Props OperationProps - } - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - o.VendorExtensible = raw.Ext - o.OperationProps = raw.Props - return nil -} - -// GobEncode provides a safe gob encoder for Operation, including empty security requirements -func (op OperationProps) GobEncode() ([]byte, error) { - raw := gobAlias{ - Alias: (*opsAlias)(&op), - } - - var b bytes.Buffer - if op.Security == nil { - // nil security requirement - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - if len(op.Security) == 0 { - // empty, but non-nil security requirement - raw.SecurityIsEmpty = true - raw.Alias.Security = nil - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - raw.Security = make([]map[string]struct { - List []string - Pad bool - }, 0, len(op.Security)) - for _, req := range op.Security { - v := make(map[string]struct { - List []string - Pad bool - }, len(req)) - for k, val := range req { - v[k] = struct { - List []string - Pad bool - }{ - List: val, - } - } - raw.Security = append(raw.Security, v) - } - - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Operation, including empty security requirements -func (op *OperationProps) GobDecode(b []byte) error { - var raw gobAlias - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - if raw.Alias == nil { - return nil - } - - switch { - case raw.SecurityIsEmpty: - // empty, but non-nil security requirement - raw.Alias.Security = []map[string][]string{} - case len(raw.Alias.Security) == 0: - // nil security requirement - raw.Alias.Security = nil - default: - raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) - for _, req := range raw.Security { - v := make(map[string][]string, len(req)) - for k, val := range req { - v[k] = make([]string, 0, len(val.List)) - v[k] = append(v[k], val.List...) - } - raw.Alias.Security = append(raw.Alias.Security, v) - } - } - - *op = *(*OperationProps)(raw.Alias) - return nil -} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go deleted file mode 100644 index cecdff54..00000000 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// QueryParam creates a query parameter -func QueryParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} -} - -// HeaderParam creates a header parameter, this is always required by default -func HeaderParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} -} - -// PathParam creates a path parameter, this is always required -func PathParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} -} - -// BodyParam creates a body parameter -func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, - SimpleSchema: SimpleSchema{Type: "object"}} -} - -// FormDataParam creates a body parameter -func FormDataParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} -} - -// FileParam creates a body parameter -func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, - SimpleSchema: SimpleSchema{Type: "file"}} -} - -// SimpleArrayParam creates a param for a simple array (string, int, date etc) -func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, - SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", - Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} -} - -// ParamRef creates a parameter that's a json reference -func ParamRef(uri string) *Parameter { - p := new(Parameter) - p.Ref = MustCreateRef(uri) - return p -} - -// ParamProps describes the specific attributes of an operation parameter -// -// NOTE: -// - Schema is defined when "in" == "body": see validate -// - AllowEmptyValue is allowed where "in" == "query" || "formData" -type ParamProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` -} - -// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). -// -// There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. -// -// For more information: http://goo.gl/8us55a#parameterObject -type Parameter struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible - ParamProps -} - -// JSONLookup look up a value by the json property name -func (p Parameter) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == jsonRef { - return &p.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.ParamProps, token) - return r, err -} - -// WithDescription a fluent builder method for the description of the parameter -func (p *Parameter) WithDescription(description string) *Parameter { - p.Description = description - return p -} - -// Named a fluent builder method to override the name of the parameter -func (p *Parameter) Named(name string) *Parameter { - p.Name = name - return p -} - -// WithLocation a fluent builder method to override the location of the parameter -func (p *Parameter) WithLocation(in string) *Parameter { - p.In = in - return p -} - -// Typed a fluent builder method for the type of the parameter value -func (p *Parameter) Typed(tpe, format string) *Parameter { - p.Type = tpe - p.Format = format - return p -} - -// CollectionOf a fluent builder method for an array parameter -func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = jsonArray - p.Items = items - p.CollectionFormat = format - return p -} - -// WithDefault sets the default value on this parameter -func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { - p.AsOptional() // with default implies optional - p.Default = defaultValue - return p -} - -// AllowsEmptyValues flags this parameter as being ok with empty values -func (p *Parameter) AllowsEmptyValues() *Parameter { - p.AllowEmptyValue = true - return p -} - -// NoEmptyValues flags this parameter as not liking empty values -func (p *Parameter) NoEmptyValues() *Parameter { - p.AllowEmptyValue = false - return p -} - -// AsOptional flags this parameter as optional -func (p *Parameter) AsOptional() *Parameter { - p.Required = false - return p -} - -// AsRequired flags this parameter as required -func (p *Parameter) AsRequired() *Parameter { - if p.Default != nil { // with a default required makes no sense - return p - } - p.Required = true - return p -} - -// WithMaxLength sets a max length value -func (p *Parameter) WithMaxLength(max int64) *Parameter { - p.MaxLength = &max - return p -} - -// WithMinLength sets a min length value -func (p *Parameter) WithMinLength(min int64) *Parameter { - p.MinLength = &min - return p -} - -// WithPattern sets a pattern value -func (p *Parameter) WithPattern(pattern string) *Parameter { - p.Pattern = pattern - return p -} - -// WithMultipleOf sets a multiple of value -func (p *Parameter) WithMultipleOf(number float64) *Parameter { - p.MultipleOf = &number - return p -} - -// WithMaximum sets a maximum number value -func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { - p.Maximum = &max - p.ExclusiveMaximum = exclusive - return p -} - -// WithMinimum sets a minimum number value -func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { - p.Minimum = &min - p.ExclusiveMinimum = exclusive - return p -} - -// WithEnum sets a the enum values (replace) -func (p *Parameter) WithEnum(values ...interface{}) *Parameter { - p.Enum = append([]interface{}{}, values...) - return p -} - -// WithMaxItems sets the max items -func (p *Parameter) WithMaxItems(size int64) *Parameter { - p.MaxItems = &size - return p -} - -// WithMinItems sets the min items -func (p *Parameter) WithMinItems(size int64) *Parameter { - p.MinItems = &size - return p -} - -// UniqueValues dictates that this array can only have unique items -func (p *Parameter) UniqueValues() *Parameter { - p.UniqueItems = true - return p -} - -// AllowDuplicates this array can have duplicates -func (p *Parameter) AllowDuplicates() *Parameter { - p.UniqueItems = false - return p -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Parameter) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.ParamProps) -} - -// MarshalJSON converts this items object to JSON -func (p Parameter) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.ParamProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b3, b1, b2, b4, b5), nil -} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go deleted file mode 100644 index 68fc8e90..00000000 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// PathItemProps the path item specific properties -type PathItemProps struct { - Get *Operation `json:"get,omitempty"` - Put *Operation `json:"put,omitempty"` - Post *Operation `json:"post,omitempty"` - Delete *Operation `json:"delete,omitempty"` - Options *Operation `json:"options,omitempty"` - Head *Operation `json:"head,omitempty"` - Patch *Operation `json:"patch,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` -} - -// PathItem describes the operations available on a single path. -// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// The path itself is still exposed to the documentation viewer but they will -// not know which operations and parameters are available. -// -// For more information: http://goo.gl/8us55a#pathItemObject -type PathItem struct { - Refable - VendorExtensible - PathItemProps -} - -// JSONLookup look up a value by the json property name -func (p PathItem) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == jsonRef { - return &p.Ref, nil - } - r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *PathItem) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.PathItemProps) -} - -// MarshalJSON converts this items object to JSON -func (p PathItem) MarshalJSON() ([]byte, error) { - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.PathItemProps) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b3, b4, b5) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go deleted file mode 100644 index 9dc82a29..00000000 --- a/vendor/github.com/go-openapi/spec/paths.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/go-openapi/swag" -) - -// Paths holds the relative paths to the individual endpoints. -// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order -// to construct the full URL. -// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// -// For more information: http://goo.gl/8us55a#pathsObject -type Paths struct { - VendorExtensible - Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" -} - -// JSONLookup look up a value by the json property name -func (p Paths) JSONLookup(token string) (interface{}, error) { - if pi, ok := p.Paths[token]; ok { - return &pi, nil - } - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Paths) UnmarshalJSON(data []byte) error { - var res map[string]json.RawMessage - if err := json.Unmarshal(data, &res); err != nil { - return err - } - for k, v := range res { - if strings.HasPrefix(strings.ToLower(k), "x-") { - if p.Extensions == nil { - p.Extensions = make(map[string]interface{}) - } - var d interface{} - if err := json.Unmarshal(v, &d); err != nil { - return err - } - p.Extensions[k] = d - } - if strings.HasPrefix(k, "/") { - if p.Paths == nil { - p.Paths = make(map[string]PathItem) - } - var pi PathItem - if err := json.Unmarshal(v, &pi); err != nil { - return err - } - p.Paths[k] = pi - } - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - - pths := make(map[string]PathItem) - for k, v := range p.Paths { - if strings.HasPrefix(k, "/") { - pths[k] = v - } - } - b2, err := json.Marshal(pths) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go deleted file mode 100644 index 08ff869b..00000000 --- a/vendor/github.com/go-openapi/spec/ref.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "net/http" - "os" - "path/filepath" - - "github.com/go-openapi/jsonreference" -) - -// Refable is a struct for things that accept a $ref property -type Refable struct { - Ref Ref -} - -// MarshalJSON marshals the ref to json -func (r Refable) MarshalJSON() ([]byte, error) { - return r.Ref.MarshalJSON() -} - -// UnmarshalJSON unmarshalss the ref from json -func (r *Refable) UnmarshalJSON(d []byte) error { - return json.Unmarshal(d, &r.Ref) -} - -// Ref represents a json reference that is potentially resolved -type Ref struct { - jsonreference.Ref -} - -// RemoteURI gets the remote uri part of the ref -func (r *Ref) RemoteURI() string { - if r.String() == "" { - return r.String() - } - - u := *r.GetURL() - u.Fragment = "" - return u.String() -} - -// IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI(basepaths ...string) bool { - if r.String() == "" { - return true - } - - v := r.RemoteURI() - if v == "" { - return true - } - - if r.HasFullURL { - rr, err := http.Get(v) - if err != nil { - return false - } - - return rr.StatusCode/100 == 2 - } - - if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { - return false - } - - // check for local file - pth := v - if r.HasURLPathOnly { - base := "." - if len(basepaths) > 0 { - base = filepath.Dir(filepath.Join(basepaths...)) - } - p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) - if e != nil { - return false - } - pth = p - } - - fi, err := os.Stat(filepath.ToSlash(pth)) - if err != nil { - return false - } - - return !fi.IsDir() -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - ref, err := r.Ref.Inherits(child.Ref) - if err != nil { - return nil, err - } - return &Ref{Ref: *ref}, nil -} - -// NewRef creates a new instance of a ref object -// returns an error when the reference uri is an invalid uri -func NewRef(refURI string) (Ref, error) { - ref, err := jsonreference.New(refURI) - if err != nil { - return Ref{}, err - } - return Ref{Ref: ref}, nil -} - -// MustCreateRef creates a ref object but panics when refURI is invalid. -// Use the NewRef method for a version that returns an error. -func MustCreateRef(refURI string) Ref { - return Ref{Ref: jsonreference.MustCreateRef(refURI)} -} - -// MarshalJSON marshals this ref into a JSON object -func (r Ref) MarshalJSON() ([]byte, error) { - str := r.String() - if str == "" { - if r.IsRoot() { - return []byte(`{"$ref":""}`), nil - } - return []byte("{}"), nil - } - v := map[string]interface{}{"$ref": str} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshals this ref from a JSON object -func (r *Ref) UnmarshalJSON(d []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(d, &v); err != nil { - return err - } - return r.fromMap(v) -} - -// GobEncode provides a safe gob encoder for Ref -func (r Ref) GobEncode() ([]byte, error) { - var b bytes.Buffer - raw, err := r.MarshalJSON() - if err != nil { - return nil, err - } - err = gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Ref -func (r *Ref) GobDecode(b []byte) error { - var raw []byte - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - return json.Unmarshal(raw, r) -} - -func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go deleted file mode 100644 index 27729c1d..00000000 --- a/vendor/github.com/go-openapi/spec/response.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// ResponseProps properties specific to a response -type ResponseProps struct { - Description string `json:"description,omitempty"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` -} - -// Response describes a single response from an API Operation. -// -// For more information: http://goo.gl/8us55a#responseObject -type Response struct { - Refable - ResponseProps - VendorExtensible -} - -// JSONLookup look up a value by the json property name -func (r Response) JSONLookup(token string) (interface{}, error) { - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if token == "$ref" { - return &r.Ref, nil - } - ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) - return ptr, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Response) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponseProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - return json.Unmarshal(data, &r.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (r Response) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponseProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - b3, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// NewResponse creates a new response instance -func NewResponse() *Response { - return new(Response) -} - -// ResponseRef creates a response as a json reference -func ResponseRef(url string) *Response { - resp := NewResponse() - resp.Ref = MustCreateRef(url) - return resp -} - -// WithDescription sets the description on this response, allows for chaining -func (r *Response) WithDescription(description string) *Response { - r.Description = description - return r -} - -// WithSchema sets the schema on this response, allows for chaining. -// Passing a nil argument removes the schema from this response -func (r *Response) WithSchema(schema *Schema) *Response { - r.Schema = schema - return r -} - -// AddHeader adds a header to this response -func (r *Response) AddHeader(name string, header *Header) *Response { - if header == nil { - return r.RemoveHeader(name) - } - if r.Headers == nil { - r.Headers = make(map[string]Header) - } - r.Headers[name] = *header - return r -} - -// RemoveHeader removes a header from this response -func (r *Response) RemoveHeader(name string) *Response { - delete(r.Headers, name) - return r -} - -// AddExample adds an example to this response -func (r *Response) AddExample(mediaType string, example interface{}) *Response { - if r.Examples == nil { - r.Examples = make(map[string]interface{}) - } - r.Examples[mediaType] = example - return r -} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go deleted file mode 100644 index 4efb6f86..00000000 --- a/vendor/github.com/go-openapi/spec/responses.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - "github.com/go-openapi/swag" -) - -// Responses is a container for the expected responses of an operation. -// The container maps a HTTP response code to the expected response. -// It is not expected from the documentation to necessarily cover all possible HTTP response codes, -// since they may not be known in advance. However, it is expected from the documentation to cover -// a successful operation response and any known errors. -// -// The `default` can be used a default response object for all HTTP codes that are not covered -// individually by the specification. -// -// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response -// for a successful operation call. -// -// For more information: http://goo.gl/8us55a#responsesObject -type Responses struct { - VendorExtensible - ResponsesProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (r Responses) JSONLookup(token string) (interface{}, error) { - if token == "default" { - return r.Default, nil - } - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if i, err := strconv.Atoi(token); err == nil { - if scr, ok := r.StatusCodeResponses[i]; ok { - return scr, nil - } - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Responses) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { - r.ResponsesProps = ResponsesProps{} - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (r Responses) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponsesProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// ResponsesProps describes all responses for an operation. -// It tells what is the default response and maps all responses with a -// HTTP status code. -type ResponsesProps struct { - Default *Response - StatusCodeResponses map[int]Response -} - -// MarshalJSON marshals responses as JSON -func (r ResponsesProps) MarshalJSON() ([]byte, error) { - toser := map[string]Response{} - if r.Default != nil { - toser["default"] = *r.Default - } - for k, v := range r.StatusCodeResponses { - toser[strconv.Itoa(k)] = v - } - return json.Marshal(toser) -} - -// UnmarshalJSON unmarshals responses from JSON -func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response - if err := json.Unmarshal(data, &res); err != nil { - return nil - } - if v, ok := res["default"]; ok { - r.Default = &v - delete(res, "default") - } - for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} - } - r.StatusCodeResponses[nk] = v - } - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go deleted file mode 100644 index 37858ece..00000000 --- a/vendor/github.com/go-openapi/spec/schema.go +++ /dev/null @@ -1,596 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// BooleanProperty creates a boolean property -func BooleanProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} -} - -// BoolProperty creates a boolean property -func BoolProperty() *Schema { return BooleanProperty() } - -// StringProperty creates a string property -func StringProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// CharProperty creates a string property -func CharProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// Float64Property creates a float64/double property -func Float64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} -} - -// Float32Property creates a float32/float property -func Float32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} -} - -// Int8Property creates an int8 property -func Int8Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} -} - -// Int16Property creates an int16 property -func Int16Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} -} - -// Int32Property creates an int32 property -func Int32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} -} - -// Int64Property creates an int64 property -func Int64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} -} - -// StrFmtProperty creates a property for the named string format -func StrFmtProperty(format string) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} -} - -// DateProperty creates a date property -func DateProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} -} - -// DateTimeProperty creates a date time property -func DateTimeProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} -} - -// MapProperty creates a map property -func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, - AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} -} - -// RefProperty creates a ref property -func RefProperty(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// RefSchema creates a ref property -func RefSchema(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// ArrayProperty creates an array property -func ArrayProperty(items *Schema) *Schema { - if items == nil { - return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} - } - return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} -} - -// ComposedSchema creates a schema with allOf -func ComposedSchema(schemas ...Schema) *Schema { - s := new(Schema) - s.AllOf = schemas - return s -} - -// SchemaURL represents a schema url -type SchemaURL string - -// MarshalJSON marshal this to JSON -func (r SchemaURL) MarshalJSON() ([]byte, error) { - if r == "" { - return []byte("{}"), nil - } - v := map[string]interface{}{"$schema": string(r)} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshal this from JSON -func (r *SchemaURL) UnmarshalJSON(data []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - return r.fromMap(v) -} - -func (r *SchemaURL) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - if vv, ok := v["$schema"]; ok { - if str, ok := vv.(string); ok { - u, err := url.Parse(str) - if err != nil { - return err - } - - *r = SchemaURL(u.String()) - } - } - return nil -} - -// SchemaProps describes a JSON schema (draft 4) -type SchemaProps struct { - ID string `json:"id,omitempty"` - Ref Ref `json:"-"` - Schema SchemaURL `json:"-"` - Description string `json:"description,omitempty"` - Type StringOrArray `json:"type,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Format string `json:"format,omitempty"` - Title string `json:"title,omitempty"` - Default interface{} `json:"default,omitempty"` - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` - Required []string `json:"required,omitempty"` - Items *SchemaOrArray `json:"items,omitempty"` - AllOf []Schema `json:"allOf,omitempty"` - OneOf []Schema `json:"oneOf,omitempty"` - AnyOf []Schema `json:"anyOf,omitempty"` - Not *Schema `json:"not,omitempty"` - Properties map[string]Schema `json:"properties,omitempty"` - AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` - PatternProperties map[string]Schema `json:"patternProperties,omitempty"` - Dependencies Dependencies `json:"dependencies,omitempty"` - AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` - Definitions Definitions `json:"definitions,omitempty"` -} - -// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) -type SwaggerSchemaProps struct { - Discriminator string `json:"discriminator,omitempty"` - ReadOnly bool `json:"readOnly,omitempty"` - XML *XMLObject `json:"xml,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// Schema the schema object allows the definition of input and output data types. -// These types can be objects, but also primitives and arrays. -// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) -// and uses a predefined subset of it. -// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. -// -// For more information: http://goo.gl/8us55a#schemaObject -type Schema struct { - VendorExtensible - SchemaProps - SwaggerSchemaProps - ExtraProps map[string]interface{} `json:"-"` -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s Schema) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - if ex, ok := s.ExtraProps[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) - if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { - return r, err - } - r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) - return r, err -} - -// WithID sets the id for this schema, allows for chaining -func (s *Schema) WithID(id string) *Schema { - s.ID = id - return s -} - -// WithTitle sets the title for this schema, allows for chaining -func (s *Schema) WithTitle(title string) *Schema { - s.Title = title - return s -} - -// WithDescription sets the description for this schema, allows for chaining -func (s *Schema) WithDescription(description string) *Schema { - s.Description = description - return s -} - -// WithProperties sets the properties for this schema -func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { - s.Properties = schemas - return s -} - -// SetProperty sets a property on this schema -func (s *Schema) SetProperty(name string, schema Schema) *Schema { - if s.Properties == nil { - s.Properties = make(map[string]Schema) - } - s.Properties[name] = schema - return s -} - -// WithAllOf sets the all of property -func (s *Schema) WithAllOf(schemas ...Schema) *Schema { - s.AllOf = schemas - return s -} - -// WithMaxProperties sets the max number of properties an object can have -func (s *Schema) WithMaxProperties(max int64) *Schema { - s.MaxProperties = &max - return s -} - -// WithMinProperties sets the min number of properties an object must have -func (s *Schema) WithMinProperties(min int64) *Schema { - s.MinProperties = &min - return s -} - -// Typed sets the type of this schema for a single value item -func (s *Schema) Typed(tpe, format string) *Schema { - s.Type = []string{tpe} - s.Format = format - return s -} - -// AddType adds a type with potential format to the types for this schema -func (s *Schema) AddType(tpe, format string) *Schema { - s.Type = append(s.Type, tpe) - if format != "" { - s.Format = format - } - return s -} - -// AsNullable flags this schema as nullable. -func (s *Schema) AsNullable() *Schema { - s.Nullable = true - return s -} - -// CollectionOf a fluent builder method for an array parameter -func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{jsonArray} - s.Items = &SchemaOrArray{Schema: &items} - return s -} - -// WithDefault sets the default value on this parameter -func (s *Schema) WithDefault(defaultValue interface{}) *Schema { - s.Default = defaultValue - return s -} - -// WithRequired flags this parameter as required -func (s *Schema) WithRequired(items ...string) *Schema { - s.Required = items - return s -} - -// AddRequired adds field names to the required properties array -func (s *Schema) AddRequired(items ...string) *Schema { - s.Required = append(s.Required, items...) - return s -} - -// WithMaxLength sets a max length value -func (s *Schema) WithMaxLength(max int64) *Schema { - s.MaxLength = &max - return s -} - -// WithMinLength sets a min length value -func (s *Schema) WithMinLength(min int64) *Schema { - s.MinLength = &min - return s -} - -// WithPattern sets a pattern value -func (s *Schema) WithPattern(pattern string) *Schema { - s.Pattern = pattern - return s -} - -// WithMultipleOf sets a multiple of value -func (s *Schema) WithMultipleOf(number float64) *Schema { - s.MultipleOf = &number - return s -} - -// WithMaximum sets a maximum number value -func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { - s.Maximum = &max - s.ExclusiveMaximum = exclusive - return s -} - -// WithMinimum sets a minimum number value -func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { - s.Minimum = &min - s.ExclusiveMinimum = exclusive - return s -} - -// WithEnum sets a the enum values (replace) -func (s *Schema) WithEnum(values ...interface{}) *Schema { - s.Enum = append([]interface{}{}, values...) - return s -} - -// WithMaxItems sets the max items -func (s *Schema) WithMaxItems(size int64) *Schema { - s.MaxItems = &size - return s -} - -// WithMinItems sets the min items -func (s *Schema) WithMinItems(size int64) *Schema { - s.MinItems = &size - return s -} - -// UniqueValues dictates that this array can only have unique items -func (s *Schema) UniqueValues() *Schema { - s.UniqueItems = true - return s -} - -// AllowDuplicates this array can have duplicates -func (s *Schema) AllowDuplicates() *Schema { - s.UniqueItems = false - return s -} - -// AddToAllOf adds a schema to the allOf property -func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { - s.AllOf = append(s.AllOf, schemas...) - return s -} - -// WithDiscriminator sets the name of the discriminator field -func (s *Schema) WithDiscriminator(discriminator string) *Schema { - s.Discriminator = discriminator - return s -} - -// AsReadOnly flags this schema as readonly -func (s *Schema) AsReadOnly() *Schema { - s.ReadOnly = true - return s -} - -// AsWritable flags this schema as writeable (not read-only) -func (s *Schema) AsWritable() *Schema { - s.ReadOnly = false - return s -} - -// WithExample sets the example for this schema -func (s *Schema) WithExample(example interface{}) *Schema { - s.Example = example - return s -} - -// WithExternalDocs sets/removes the external docs for/from this schema. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (s *Schema) WithExternalDocs(description, url string) *Schema { - if description == "" && url == "" { - s.ExternalDocs = nil - return s - } - - if s.ExternalDocs == nil { - s.ExternalDocs = &ExternalDocumentation{} - } - s.ExternalDocs.Description = description - s.ExternalDocs.URL = url - return s -} - -// WithXMLName sets the xml name for the object -func (s *Schema) WithXMLName(name string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Name = name - return s -} - -// WithXMLNamespace sets the xml namespace for the object -func (s *Schema) WithXMLNamespace(namespace string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Namespace = namespace - return s -} - -// WithXMLPrefix sets the xml prefix for the object -func (s *Schema) WithXMLPrefix(prefix string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Prefix = prefix - return s -} - -// AsXMLAttribute flags this object as xml attribute -func (s *Schema) AsXMLAttribute() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = true - return s -} - -// AsXMLElement flags this object as an xml node -func (s *Schema) AsXMLElement() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = false - return s -} - -// AsWrappedXML flags this object as wrapped, this is mostly useful for array types -func (s *Schema) AsWrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = true - return s -} - -// AsUnwrappedXML flags this object as an xml node -func (s *Schema) AsUnwrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = false - return s -} - -// MarshalJSON marshal this to JSON -func (s Schema) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SchemaProps) - if err != nil { - return nil, fmt.Errorf("schema props %v", err) - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, fmt.Errorf("vendor props %v", err) - } - b3, err := s.Ref.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("ref prop %v", err) - } - b4, err := s.Schema.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("schema prop %v", err) - } - b5, err := json.Marshal(s.SwaggerSchemaProps) - if err != nil { - return nil, fmt.Errorf("common validations %v", err) - } - var b6 []byte - if s.ExtraProps != nil { - jj, err := json.Marshal(s.ExtraProps) - if err != nil { - return nil, fmt.Errorf("extra props %v", err) - } - b6 = jj - } - return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *Schema) UnmarshalJSON(data []byte) error { - props := struct { - SchemaProps - SwaggerSchemaProps - }{} - if err := json.Unmarshal(data, &props); err != nil { - return err - } - - sch := Schema{ - SchemaProps: props.SchemaProps, - SwaggerSchemaProps: props.SwaggerSchemaProps, - } - - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - _ = sch.Ref.fromMap(d) - _ = sch.Schema.fromMap(d) - - delete(d, "$ref") - delete(d, "$schema") - for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { - delete(d, pn) - } - - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if sch.Extensions == nil { - sch.Extensions = map[string]interface{}{} - } - sch.Extensions[k] = vv - continue - } - if sch.ExtraProps == nil { - sch.ExtraProps = map[string]interface{}{} - } - sch.ExtraProps[k] = vv - } - - *s = sch - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go deleted file mode 100644 index 9e20e96c..00000000 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "reflect" - "strings" - - "github.com/go-openapi/swag" -) - -// PathLoader function to use when loading remote refs -var PathLoader func(string) (json.RawMessage, error) - -func init() { - PathLoader = func(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil - } -} - -// resolverContext allows to share a context during spec processing. -// At the moment, it just holds the index of circular references found. -type resolverContext struct { - // circulars holds all visited circular references, which allows shortcuts. - // NOTE: this is not just a performance improvement: it is required to figure out - // circular references which participate several cycles. - // This structure is privately instantiated and needs not be locked against - // concurrent access, unless we chose to implement a parallel spec walking. - circulars map[string]bool - basePath string -} - -func newResolverContext(originalBasePath string) *resolverContext { - return &resolverContext{ - circulars: make(map[string]bool), - basePath: originalBasePath, // keep the root base path in context - } -} - -type schemaLoader struct { - root interface{} - options *ExpandOptions - cache ResolutionCache - context *resolverContext - loadDoc func(string) (json.RawMessage, error) -} - -func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) (*schemaLoader, error) { - if ref.IsRoot() || ref.HasFragmentOnly { - return r, nil - } - - baseRef, _ := NewRef(basePath) - currentRef := normalizeFileRef(&ref, basePath) - if strings.HasPrefix(currentRef.String(), baseRef.String()) { - return r, nil - } - - // Set a new root to resolve against - rootURL := currentRef.GetURL() - rootURL.Fragment = "" - root, _ := r.cache.Get(rootURL.String()) - - // shallow copy of resolver options to set a new RelativeBase when - // traversing multiple documents - newOptions := r.options - newOptions.RelativeBase = rootURL.String() - debugLog("setting new root: %s", newOptions.RelativeBase) - resolver, err := defaultSchemaLoader(root, newOptions, r.cache, r.context) - if err != nil { - return nil, err - } - - return resolver, nil -} - -func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { - if transitive != r { - debugLog("got a new resolver") - if transitive.options != nil && transitive.options.RelativeBase != "" { - basePath, _ = absPath(transitive.options.RelativeBase) - debugLog("new basePath = %s", basePath) - } - } - return basePath -} - -func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { - tgt := reflect.ValueOf(target) - if tgt.Kind() != reflect.Ptr { - return fmt.Errorf("resolve ref: target needs to be a pointer") - } - - refURL := ref.GetURL() - if refURL == nil { - return nil - } - - var res interface{} - var data interface{} - var err error - // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means - // it is pointing somewhere in the root. - root := r.root - if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { - if baseRef, erb := NewRef(basePath); erb == nil { - root, _, _, _ = r.load(baseRef.GetURL()) - } - } - if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { - data = root - } else { - baseRef := normalizeFileRef(ref, basePath) - debugLog("current ref is: %s", ref.String()) - debugLog("current ref normalized file: %s", baseRef.String()) - data, _, _, err = r.load(baseRef.GetURL()) - if err != nil { - return err - } - } - - res = data - if ref.String() != "" { - res, _, err = ref.GetPointer().Get(data) - if err != nil { - return err - } - } - return swag.DynamicJSONToStruct(res, target) -} - -func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - debugLog("loading schema from url: %s", refURL) - toFetch := *refURL - toFetch.Fragment = "" - - normalized := normalizeAbsPath(toFetch.String()) - - data, fromCache := r.cache.Get(normalized) - if !fromCache { - b, err := r.loadDoc(normalized) - if err != nil { - debugLog("unable to load the document: %v", err) - return nil, url.URL{}, false, err - } - - if err := json.Unmarshal(b, &data); err != nil { - return nil, url.URL{}, false, err - } - r.cache.Set(normalized, data) - } - - return data, toFetch, fromCache, nil -} - -// isCircular detects cycles in sequences of $ref. -// It relies on a private context (which needs not be locked). -func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { - normalizedRef := normalizePaths(ref.String(), basePath) - if _, ok := r.context.circulars[normalizedRef]; ok { - // circular $ref has been already detected in another explored cycle - foundCycle = true - return - } - foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) - if foundCycle { - r.context.circulars[normalizedRef] = true - } - return -} - -// Resolve resolves a reference against basePath and stores the result in target -// Resolve is not in charge of following references, it only resolves ref by following its URL -// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them -// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct -func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { - return r.resolveRef(ref, target, basePath) -} - -func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { - var ref *Ref - switch refable := input.(type) { - case *Schema: - ref = &refable.Ref - case *Parameter: - ref = &refable.Ref - case *Response: - ref = &refable.Ref - case *PathItem: - ref = &refable.Ref - default: - return fmt.Errorf("deref: unsupported type %T", input) - } - - curRef := ref.String() - if curRef != "" { - normalizedRef := normalizeFileRef(ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if r.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { - return err - } - - // NOTE(fredbi): removed basePath check => needs more testing - if ref.String() != "" && ref.String() != curRef { - parentRefs = append(parentRefs, normalizedRef.String()) - return r.deref(input, parentRefs, normalizedBasePath) - } - } - - return nil -} - -func (r *schemaLoader) shouldStopOnError(err error) bool { - if err != nil && !r.options.ContinueOnError { - return true - } - - if err != nil { - log.Println(err) - } - - return false -} - -func defaultSchemaLoader( - root interface{}, - expandOptions *ExpandOptions, - cache ResolutionCache, - context *resolverContext) (*schemaLoader, error) { - - if cache == nil { - cache = resCache - } - if expandOptions == nil { - expandOptions = &ExpandOptions{} - } - absBase, _ := absPath(expandOptions.RelativeBase) - if context == nil { - context = newResolverContext(absBase) - } - return &schemaLoader{ - root: root, - options: expandOptions, - cache: cache, - context: context, - loadDoc: func(path string) (json.RawMessage, error) { - debugLog("fetching document at %q", path) - return PathLoader(path) - }, - }, nil -} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go deleted file mode 100644 index fe353842..00000000 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - basic = "basic" - apiKey = "apiKey" - oauth2 = "oauth2" - implicit = "implicit" - password = "password" - application = "application" - accessCode = "accessCode" -) - -// BasicAuth creates a basic auth security scheme -func BasicAuth() *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} -} - -// APIKeyAuth creates an api key auth security scheme -func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} -} - -// OAuth2Implicit creates an implicit flow oauth2 security scheme -func OAuth2Implicit(authorizationURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: implicit, - AuthorizationURL: authorizationURL, - }} -} - -// OAuth2Password creates a password flow oauth2 security scheme -func OAuth2Password(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: password, - TokenURL: tokenURL, - }} -} - -// OAuth2Application creates an application flow oauth2 security scheme -func OAuth2Application(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: application, - TokenURL: tokenURL, - }} -} - -// OAuth2AccessToken creates an access token flow oauth2 security scheme -func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: accessCode, - AuthorizationURL: authorizationURL, - TokenURL: tokenURL, - }} -} - -// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section -type SecuritySchemeProps struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 -} - -// AddScope adds a scope to this security scheme -func (s *SecuritySchemeProps) AddScope(scope, description string) { - if s.Scopes == nil { - s.Scopes = make(map[string]string) - } - s.Scopes[scope] = description -} - -// SecurityScheme allows the definition of a security scheme that can be used by the operations. -// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) -// and OAuth2's common flows (implicit, password, application and access code). -// -// For more information: http://goo.gl/8us55a#securitySchemeObject -type SecurityScheme struct { - VendorExtensible - SecuritySchemeProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (s SecurityScheme) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecuritySchemeProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *SecurityScheme) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go deleted file mode 100644 index 0bb045bc..00000000 --- a/vendor/github.com/go-openapi/spec/spec.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import "encoding/json" - -//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json -//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema -//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... -//go:generate perl -pi -e s,Json,JSON,g bindata.go - -const ( - // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs - SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema schema - JSONSchemaURL = "http://json-schema.org/draft-04/schema#" -) - -var ( - jsonSchema *Schema - swaggerSchema *Schema -) - -func init() { - jsonSchema = MustLoadJSONSchemaDraft04() - swaggerSchema = MustLoadSwagger20Schema() -} - -// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error -func MustLoadJSONSchemaDraft04() *Schema { - d, e := JSONSchemaDraft04() - if e != nil { - panic(e) - } - return d -} - -// JSONSchemaDraft04 loads the json schema document for json shema draft04 -func JSONSchemaDraft04() (*Schema, error) { - b, err := Asset("jsonschema-draft-04.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} - -// MustLoadSwagger20Schema panics when Swagger20Schema returns an error -func MustLoadSwagger20Schema() *Schema { - d, e := Swagger20Schema() - if e != nil { - panic(e) - } - return d -} - -// Swagger20Schema loads the swagger 2.0 schema from the embedded assets -func Swagger20Schema() (*Schema, error) { - - b, err := Asset("v2/schema.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go deleted file mode 100644 index 44722ffd..00000000 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - "strconv" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) -// together into one document. -// -// For more information: http://goo.gl/8us55a#swagger-object- -type Swagger struct { - VendorExtensible - SwaggerProps -} - -// JSONLookup look up a value by the json property name -func (s Swagger) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) - return r, err -} - -// MarshalJSON marshals this swagger structure to json -func (s Swagger) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SwaggerProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON unmarshals a swagger spec from json -func (s *Swagger) UnmarshalJSON(data []byte) error { - var sw Swagger - if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { - return err - } - *s = sw - return nil -} - -// GobEncode provides a safe gob encoder for Swagger, including extensions -func (s Swagger) GobEncode() ([]byte, error) { - var b bytes.Buffer - raw := struct { - Props SwaggerProps - Ext VendorExtensible - }{ - Props: s.SwaggerProps, - Ext: s.VendorExtensible, - } - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Swagger, including extensions -func (s *Swagger) GobDecode(b []byte) error { - var raw struct { - Props SwaggerProps - Ext VendorExtensible - } - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - s.SwaggerProps = raw.Props - s.VendorExtensible = raw.Ext - return nil -} - -// SwaggerProps captures the top-level properties of an Api specification -// -// NOTE: validation rules -// - the scheme, when present must be from [http, https, ws, wss] -// - BasePath must start with a leading "/" -// - Paths is required -type SwaggerProps struct { - ID string `json:"id,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Swagger string `json:"swagger,omitempty"` - Info *Info `json:"info,omitempty"` - Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` - Paths *Paths `json:"paths"` - Definitions Definitions `json:"definitions,omitempty"` - Parameters map[string]Parameter `json:"parameters,omitempty"` - Responses map[string]Response `json:"responses,omitempty"` - SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Tags []Tag `json:"tags,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -type swaggerPropsAlias SwaggerProps - -type gobSwaggerPropsAlias struct { - Security []map[string]struct { - List []string - Pad bool - } - Alias *swaggerPropsAlias - SecurityIsEmpty bool -} - -// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements -func (o SwaggerProps) GobEncode() ([]byte, error) { - raw := gobSwaggerPropsAlias{ - Alias: (*swaggerPropsAlias)(&o), - } - - var b bytes.Buffer - if o.Security == nil { - // nil security requirement - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - if len(o.Security) == 0 { - // empty, but non-nil security requirement - raw.SecurityIsEmpty = true - raw.Alias.Security = nil - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - raw.Security = make([]map[string]struct { - List []string - Pad bool - }, 0, len(o.Security)) - for _, req := range o.Security { - v := make(map[string]struct { - List []string - Pad bool - }, len(req)) - for k, val := range req { - v[k] = struct { - List []string - Pad bool - }{ - List: val, - } - } - raw.Security = append(raw.Security, v) - } - - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements -func (o *SwaggerProps) GobDecode(b []byte) error { - var raw gobSwaggerPropsAlias - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - if raw.Alias == nil { - return nil - } - - switch { - case raw.SecurityIsEmpty: - // empty, but non-nil security requirement - raw.Alias.Security = []map[string][]string{} - case len(raw.Alias.Security) == 0: - // nil security requirement - raw.Alias.Security = nil - default: - raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) - for _, req := range raw.Security { - v := make(map[string][]string, len(req)) - for k, val := range req { - v[k] = make([]string, 0, len(val.List)) - v[k] = append(v[k], val.List...) - } - raw.Alias.Security = append(raw.Alias.Security, v) - } - } - - *o = *(*SwaggerProps)(raw.Alias) - return nil -} - -// Dependencies represent a dependencies property -type Dependencies map[string]SchemaOrStringArray - -// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property -type SchemaOrBool struct { - Allows bool - Schema *Schema -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { - if token == "allows" { - return s.Allows, nil - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -var jsTrue = []byte("true") -var jsFalse = []byte("false") - -// MarshalJSON convert this object to JSON -func (s SchemaOrBool) MarshalJSON() ([]byte, error) { - if s.Schema != nil { - return json.Marshal(s.Schema) - } - - if s.Schema == nil && !s.Allows { - return jsFalse, nil - } - return jsTrue, nil -} - -// UnmarshalJSON converts this bool or schema object from a JSON structure -func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { - var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') - } - *s = nw - return nil -} - -// SchemaOrStringArray represents a schema or a string array -type SchemaOrStringArray struct { - Schema *Schema - Property []string -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { - if len(s.Property) > 0 { - return json.Marshal(s.Property) - } - if s.Schema != nil { - return json.Marshal(s.Schema) - } - return []byte("null"), nil -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - var nw SchemaOrStringArray - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Property); err != nil { - return err - } - } - *s = nw - return nil -} - -// Definitions contains the models explicitly defined in this spec -// An object to hold data types that can be consumed and produced by operations. -// These data types can be primitives, arrays or models. -// -// For more information: http://goo.gl/8us55a#definitionsObject -type Definitions map[string]Schema - -// SecurityDefinitions a declaration of the security schemes available to be used in the specification. -// This does not enforce the security schemes on the operations and only serves to provide -// the relevant details for each scheme. -// -// For more information: http://goo.gl/8us55a#securityDefinitionsObject -type SecurityDefinitions map[string]*SecurityScheme - -// StringOrArray represents a value that can either be a string -// or an array of strings. Mainly here for serialization purposes -type StringOrArray []string - -// Contains returns true when the value is contained in the slice -func (s StringOrArray) Contains(value string) bool { - for _, str := range s { - if str == value { - return true - } - } - return false -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { - if _, err := strconv.Atoi(token); err == nil { - r, _, err := jsonpointer.GetForToken(s.Schemas, token) - return r, err - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string -func (s *StringOrArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - - if first == '[' { - var parsed []string - if err := json.Unmarshal(data, &parsed); err != nil { - return err - } - *s = StringOrArray(parsed) - return nil - } - - var single interface{} - if err := json.Unmarshal(data, &single); err != nil { - return err - } - if single == nil { - return nil - } - switch v := single.(type) { - case string: - *s = StringOrArray([]string{v}) - return nil - default: - return fmt.Errorf("only string or array is allowed, not %T", single) - } -} - -// MarshalJSON converts this string or array to a JSON array or JSON string -func (s StringOrArray) MarshalJSON() ([]byte, error) { - if len(s) == 1 { - return json.Marshal([]string(s)[0]) - } - return json.Marshal([]string(s)) -} - -// SchemaOrArray represents a value that can either be a Schema -// or an array of Schema. Mainly here for serialization purposes -type SchemaOrArray struct { - Schema *Schema - Schemas []Schema -} - -// Len returns the number of schemas in this property -func (s SchemaOrArray) Len() int { - if s.Schema != nil { - return 1 - } - return len(s.Schemas) -} - -// ContainsType returns true when one of the schemas is of the specified type -func (s *SchemaOrArray) ContainsType(name string) bool { - if s.Schema != nil { - return s.Schema.Type != nil && s.Schema.Type.Contains(name) - } - return false -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { - return json.Marshal(s.Schemas) - } - return json.Marshal(s.Schema) -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { - var nw SchemaOrArray - var first byte - if len(data) > 1 { - first = data[0] - } - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Schemas); err != nil { - return err - } - } - *s = nw - return nil -} - -// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go deleted file mode 100644 index faa3d3de..00000000 --- a/vendor/github.com/go-openapi/spec/tag.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// TagProps describe a tag entry in the top level tags section of a swagger spec -type TagProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// NewTag creates a new tag -func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} -} - -// Tag allows adding meta data to a single tag that is used by the -// [Operation Object](http://goo.gl/8us55a#operationObject). -// It is not mandatory to have a Tag Object per tag used there. -// -// For more information: http://goo.gl/8us55a#tagObject -type Tag struct { - VendorExtensible - TagProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (t Tag) JSONLookup(token string) (interface{}, error) { - if ex, ok := t.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(t.TagProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (t Tag) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(t.TagProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(t.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (t *Tag) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &t.TagProps); err != nil { - return err - } - return json.Unmarshal(data, &t.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/unused.go b/vendor/github.com/go-openapi/spec/unused.go deleted file mode 100644 index aa12b56f..00000000 --- a/vendor/github.com/go-openapi/spec/unused.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -/* - -import ( - "net/url" - "os" - "path" - "path/filepath" - - "github.com/go-openapi/jsonpointer" -) - - // Some currently unused functions and definitions that - // used to be part of the expander. - - // Moved here for the record and possible future reuse - -var ( - idPtr, _ = jsonpointer.New("/id") - refPtr, _ = jsonpointer.New("/$ref") -) - -func idFromNode(node interface{}) (*Ref, error) { - if idValue, _, err := idPtr.Get(node); err == nil { - if refStr, ok := idValue.(string); ok && refStr != "" { - idRef, err := NewRef(refStr) - if err != nil { - return nil, err - } - return &idRef, nil - } - } - return nil, nil -} - -func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { - if startingRef == nil { - return nil - } - - if ptr == nil { - return startingRef - } - - ret := startingRef - var idRef *Ref - node := startingNode - - for _, tok := range ptr.DecodedTokens() { - node, _, _ = jsonpointer.GetForToken(node, tok) - if node == nil { - break - } - - idRef, _ = idFromNode(node) - if idRef != nil { - nw, err := ret.Inherits(*idRef) - if err != nil { - break - } - ret = nw - } - - refRef, _, _ := refPtr.Get(node) - if refRef != nil { - var rf Ref - switch value := refRef.(type) { - case string: - rf, _ = NewRef(value) - } - nw, err := ret.Inherits(rf) - if err != nil { - break - } - nwURL := nw.GetURL() - if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { - nwpt := filepath.ToSlash(nwURL.Path) - if filepath.IsAbs(nwpt) { - _, err := os.Stat(nwpt) - if err != nil { - nwURL.Path = filepath.Join(".", nwpt) - } - } - } - - ret = nw - } - - } - - return ret -} - -// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID -func basePathFromSchemaID(oldBasePath, id string) string { - u, err := url.Parse(oldBasePath) - if err != nil { - panic(err) - } - uid, err := url.Parse(id) - if err != nil { - panic(err) - } - - if path.IsAbs(uid.Path) { - return id - } - u.Path = path.Join(path.Dir(u.Path), uid.Path) - return u.String() -} -*/ - -// type ExtraSchemaProps map[string]interface{} - -// // JSONSchema represents a structure that is a json schema draft 04 -// type JSONSchema struct { -// SchemaProps -// ExtraSchemaProps -// } - -// // MarshalJSON marshal this to JSON -// func (s JSONSchema) MarshalJSON() ([]byte, error) { -// b1, err := json.Marshal(s.SchemaProps) -// if err != nil { -// return nil, err -// } -// b2, err := s.Ref.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b3, err := s.Schema.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b4, err := json.Marshal(s.ExtraSchemaProps) -// if err != nil { -// return nil, err -// } -// return swag.ConcatJSON(b1, b2, b3, b4), nil -// } - -// // UnmarshalJSON marshal this from JSON -// func (s *JSONSchema) UnmarshalJSON(data []byte) error { -// var sch JSONSchema -// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Ref); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Schema); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { -// return err -// } -// *s = sch -// return nil -// } diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go deleted file mode 100644 index 945a4670..00000000 --- a/vendor/github.com/go-openapi/spec/xml_object.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// XMLObject a metadata object that allows for more fine-tuned XML model definitions. -// -// For more information: http://goo.gl/8us55a#xmlObject -type XMLObject struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Prefix string `json:"prefix,omitempty"` - Attribute bool `json:"attribute,omitempty"` - Wrapped bool `json:"wrapped,omitempty"` -} - -// WithName sets the xml name for the object -func (x *XMLObject) WithName(name string) *XMLObject { - x.Name = name - return x -} - -// WithNamespace sets the xml namespace for the object -func (x *XMLObject) WithNamespace(namespace string) *XMLObject { - x.Namespace = namespace - return x -} - -// WithPrefix sets the xml prefix for the object -func (x *XMLObject) WithPrefix(prefix string) *XMLObject { - x.Prefix = prefix - return x -} - -// AsAttribute flags this object as xml attribute -func (x *XMLObject) AsAttribute() *XMLObject { - x.Attribute = true - return x -} - -// AsElement flags this object as an xml node -func (x *XMLObject) AsElement() *XMLObject { - x.Attribute = false - return x -} - -// AsWrapped flags this object as wrapped, this is mostly useful for array types -func (x *XMLObject) AsWrapped() *XMLObject { - x.Wrapped = true - return x -} - -// AsUnwrapped flags this object as an xml node -func (x *XMLObject) AsUnwrapped() *XMLObject { - x.Wrapped = false - return x -} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig deleted file mode 100644 index 3152da69..00000000 --- a/vendor/github.com/go-openapi/strfmt/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore deleted file mode 100644 index dd91ed6a..00000000 --- a/vendor/github.com/go-openapi/strfmt/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml deleted file mode 100644 index f260ce7e..00000000 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ /dev/null @@ -1,30 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 31 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoinits - - gochecknoglobals - -issues: - exclude-rules: - - path: bson.go - text: "should be .*ObjectID" - linters: - - golint - diff --git a/vendor/github.com/go-openapi/strfmt/.travis.yml b/vendor/github.com/go-openapi/strfmt/.travis.yml deleted file mode 100644 index eb962aeb..00000000 --- a/vendor/github.com/go-openapi/strfmt/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -env: -- GO111MODULE=on -notifications: - slack: - secure: zE5AtIYTpYfQPnTzP+EaQPN7JKtfFAGv6PrJqoIZLOXa8B6zGb6+J1JRNNxWi7faWbyJOxa4FSSsuPsKZMycUK6wlLFIdhDxwqeo7Ew8r6rdZKdfUHQggfNS9wO79ARoNYUDHtmnaBUS+eWSM1YqSc4i99QxyyfuURLOeAaA/q14YbdlTlaw3lrZ0qT92ot1FnVGNOx064zuHtFeUf+jAVRMZ6Q3rvqllwIlPszE6rmHGXBt2VoJxRaBetdwd7FgkcYw9FPXKHhadwC7/75ZAdmxIukhxNMw4Tr5NuPcqNcnbYLenDP7B3lssGVIrP4BRSqekS1d/tqvdvnnFWHMwrNCkSnSc065G5+qWTlXKAemIclgiXXqE2furBNLm05MDdG8fn5epS0UNarkjD+zX336RiqwBlOX4KbF+vPyqcO98CsN0lnd+H6loc9reiTHs37orFFpQ+309av9be2GGsHUsRB9ssIyrewmhAccOmkRtr2dVTZJNFQwa5Kph5TNJuTjnZEwG/xUkEX2YSfwShOsb062JWiflV6PJdnl80pc9Tn7D5sO5Bf9DbijGRJwwP+YiiJtwtr+vsvS+n4sM0b5eqm4UoRo+JJO8ffoJtHS7ItuyRbVQCwEPJ4221WLcf5PquEEDdAPwR+K4Gj8qTXqTDdxOiES1xFUKVgmzhI= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/strfmt/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md deleted file mode 100644 index 87357cd0..00000000 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) - -This package exposes a registry of data types to support string formats in the go-openapi toolkit. - -strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. - -## Supported data formats -go-openapi/strfmt follows the swagger 2.0 specification with the following formats -defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). - -It also provides convenient extensions to go-openapi users. - -- [x] JSON-schema draft 4 formats - - date-time - - email - - hostname - - ipv4 - - ipv6 - - uri -- [x] swagger 2.0 format extensions - - binary - - byte (e.g. base64 encoded string) - - date (e.g. "1970-01-01") - - password -- [x] go-openapi custom format extensions - - bsonobjectid (BSON objectID) - - creditcard - - duration (e.g. "3 weeks", "1ms") - - hexcolor (e.g. "#FFFFFF") - - isbn, isbn10, isbn13 - - mac (e.g "01:02:03:04:05:06") - - rgbcolor (e.g. "rgb(100,100,100)") - - ssn - - uuid, uuid3, uuid4, uuid5 - - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") - -> NOTE: as the name stands for, this package is intended to support string formatting only. -> It does not provide validation for numerical values with swagger format extension for JSON types "number" or -> "integer" (e.g. float, double, int32...). - -## Format types -Types defined in strfmt expose marshaling and validation capabilities. - -List of defined types: -- Base64 -- CreditCard -- Date -- DateTime -- Duration -- Email -- HexColor -- Hostname -- IPv4 -- IPv6 -- CIDR -- ISBN -- ISBN10 -- ISBN13 -- MAC -- ObjectId -- Password -- RGBColor -- SSN -- URI -- UUID -- UUID3 -- UUID4 -- UUID5 diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go deleted file mode 100644 index c1496122..00000000 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "fmt" - - "go.mongodb.org/mongo-driver/bson" - - "go.mongodb.org/mongo-driver/bson/bsontype" - bsonprim "go.mongodb.org/mongo-driver/bson/primitive" -) - -func init() { - var id ObjectId - // register this format in the default registry - Default.Add("bsonobjectid", &id, IsBSONObjectID) -} - -// IsBSONObjectID returns true when the string is a valid BSON.ObjectId -func IsBSONObjectID(str string) bool { - _, err := bsonprim.ObjectIDFromHex(str) - return err == nil -} - -// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) -// -// swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID - -// NewObjectId creates a ObjectId from a Hex String -func NewObjectId(hex string) ObjectId { - oid, err := bsonprim.ObjectIDFromHex(hex) - if err != nil { - panic(err) - } - return ObjectId(oid) -} - -// MarshalText turns this instance into text -func (id ObjectId) MarshalText() ([]byte, error) { - oid := bsonprim.ObjectID(id) - if oid == bsonprim.NilObjectID { - return nil, nil - } - return []byte(oid.Hex()), nil -} - -// UnmarshalText hydrates this instance from text -func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on - if len(data) == 0 { - *id = ObjectId(bsonprim.NilObjectID) - return nil - } - oidstr := string(data) - oid, err := bsonprim.ObjectIDFromHex(oidstr) - if err != nil { - return err - } - *id = ObjectId(oid) - return nil -} - -// Scan read a value from a database driver -func (id *ObjectId) Scan(raw interface{}) error { - var data []byte - switch v := raw.(type) { - case []byte: - data = v - case string: - data = []byte(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) - } - - return id.UnmarshalText(data) -} - -// Value converts a value to a database driver value -func (id ObjectId) Value() (driver.Value, error) { - return driver.Value(bsonprim.ObjectID(id).Hex()), nil -} - -func (id ObjectId) String() string { - return bsonprim.ObjectID(id).String() -} - -// MarshalJSON returns the ObjectId as JSON -func (id ObjectId) MarshalJSON() ([]byte, error) { - return bsonprim.ObjectID(id).MarshalJSON() -} - -// UnmarshalJSON sets the ObjectId from JSON -func (id *ObjectId) UnmarshalJSON(data []byte) error { - var obj bsonprim.ObjectID - if err := obj.UnmarshalJSON(data); err != nil { - return err - } - *id = ObjectId(obj) - return nil -} - -// MarshalBSON renders the object id as a BSON document -func (id ObjectId) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) -} - -// UnmarshalBSON reads the objectId from a BSON document -func (id *ObjectId) UnmarshalBSON(data []byte) error { - var obj struct { - Data bsonprim.ObjectID - } - if err := bson.Unmarshal(data, &obj); err != nil { - return err - } - *id = ObjectId(obj.Data) - return nil -} - -// MarshalBSONValue is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { - oid := bsonprim.ObjectID(id) - return bsontype.ObjectID, oid[:], nil -} - -// UnmarshalBSONValue is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -func (id *ObjectId) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { - var oid bsonprim.ObjectID - copy(oid[:], data) - *id = ObjectId(oid) - return nil -} - -// DeepCopyInto copies the receiver and writes its value into out. -func (id *ObjectId) DeepCopyInto(out *ObjectId) { - *out = *id -} - -// DeepCopy copies the receiver into a new ObjectId. -func (id *ObjectId) DeepCopy() *ObjectId { - if id == nil { - return nil - } - out := new(ObjectId) - id.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go deleted file mode 100644 index 2959e657..00000000 --- a/vendor/github.com/go-openapi/strfmt/date.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "encoding/json" - "errors" - "fmt" - "time" - - "go.mongodb.org/mongo-driver/bson" -) - -func init() { - d := Date{} - // register this format in the default registry - Default.Add("date", &d, IsDate) -} - -// IsDate returns true when the string is a valid date -func IsDate(str string) bool { - _, err := time.Parse(RFC3339FullDate, str) - return err == nil -} - -const ( - // RFC3339FullDate represents a full-date as specified by RFC3339 - // See: http://goo.gl/xXOvVd - RFC3339FullDate = "2006-01-02" -) - -// Date represents a date from the API -// -// swagger:strfmt date -type Date time.Time - -// String converts this date into a string -func (d Date) String() string { - return time.Time(d).Format(RFC3339FullDate) -} - -// UnmarshalText parses a text representation into a date type -func (d *Date) UnmarshalText(text []byte) error { - if len(text) == 0 { - return nil - } - dd, err := time.Parse(RFC3339FullDate, string(text)) - if err != nil { - return err - } - *d = Date(dd) - return nil -} - -// MarshalText serializes this date type to string -func (d Date) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// Scan scans a Date value from database driver type. -func (d *Date) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - return d.UnmarshalText(v) - case string: - return d.UnmarshalText([]byte(v)) - case time.Time: - *d = Date(v) - return nil - case nil: - *d = Date{} - return nil - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) - } -} - -// Value converts Date to a primitive value ready to written to a database. -func (d Date) Value() (driver.Value, error) { - return driver.Value(d.String()), nil -} - -// MarshalJSON returns the Date as JSON -func (d Date) MarshalJSON() ([]byte, error) { - return json.Marshal(time.Time(d).Format(RFC3339FullDate)) -} - -// UnmarshalJSON sets the Date from JSON -func (d *Date) UnmarshalJSON(data []byte) error { - if string(data) == jsonNull { - return nil - } - var strdate string - if err := json.Unmarshal(data, &strdate); err != nil { - return err - } - tt, err := time.Parse(RFC3339FullDate, strdate) - if err != nil { - return err - } - *d = Date(tt) - return nil -} - -func (d Date) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": d.String()}) -} - -func (d *Date) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { - return err - } - - if data, ok := m["data"].(string); ok { - rd, err := time.Parse(RFC3339FullDate, data) - if err != nil { - return err - } - *d = Date(rd) - return nil - } - - return errors.New("couldn't unmarshal bson bytes value as Date") -} - -// DeepCopyInto copies the receiver and writes its value into out. -func (d *Date) DeepCopyInto(out *Date) { - *out = *d -} - -// DeepCopy copies the receiver into a new Date. -func (d *Date) DeepCopy() *Date { - if d == nil { - return nil - } - out := new(Date) - d.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go deleted file mode 100644 index a89a4de3..00000000 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ /dev/null @@ -1,2035 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/mail" - "regexp" - "strings" - - "github.com/asaskevich/govalidator" - "go.mongodb.org/mongo-driver/bson" -) - -const ( - // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 - // A string instance is valid against this attribute if it is a valid - // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - // http://tools.ietf.org/html/rfc1034#section-3.5 - // ::= any one of the ten digits 0 through 9 - // var digit = /[0-9]/; - // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case - // var letter = /[a-zA-Z]/; - // ::= | - // var letDig = /[0-9a-zA-Z]/; - // ::= | "-" - // var letDigHyp = /[-0-9a-zA-Z]/; - // ::= | - // var ldhStr = /[-0-9a-zA-Z]+/; - //