diff --git a/.gitignore b/.gitignore index f76be949f..00b4b8a4d 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ .generate_exes .get_deps bin/ +**/.test/ diff --git a/Gopkg.lock b/Gopkg.lock index e294f0f7a..81f017b44 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -97,6 +97,12 @@ packages = ["."] revision = "944e07253867aacae43c04b2e6a239005443f33a" +[[projects]] + name = "github.com/fsnotify/fsnotify" + packages = ["."] + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + version = "v1.4.7" + [[projects]] name = "github.com/ghodss/yaml" packages = ["."] @@ -193,6 +199,12 @@ packages = [".","simplelru"] revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6" +[[projects]] + branch = "master" + name = "github.com/hashicorp/hcl" + packages = [".","hcl/ast","hcl/parser","hcl/printer","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"] + revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" + [[projects]] branch = "master" name = "github.com/howeyc/gopass" @@ -235,6 +247,12 @@ packages = ["."] revision = "7cafcd837844e784b526369c9bce262804aebc60" +[[projects]] + name = "github.com/magiconair/properties" + packages = ["."] + revision = "c2353362d570a7bfa228149c62842019201cfb71" + version = "v1.8.0" + [[projects]] branch = "master" name = "github.com/mailru/easyjson" @@ -247,18 +265,36 @@ revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" version = "v1.0.0" +[[projects]] + branch = "master" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" + [[projects]] branch = "master" name = "github.com/mxk/go-flowrate" packages = ["flowrate"] revision = "cca7078d478f8520f85629ad7c68962d31ed7682" +[[projects]] + name = "github.com/onsi/gomega" + packages = [".","format","gbytes","gexec","internal/assertion","internal/asyncassertion","internal/oraclematcher","internal/testingtsupport","matchers","matchers/support/goraph/bipartitegraph","matchers/support/goraph/edge","matchers/support/goraph/node","matchers/support/goraph/util","types"] + revision = "62bff4df71bdbc266561a0caee19f0594b17c240" + version = "v1.4.0" + [[projects]] name = "github.com/pborman/uuid" packages = ["."] revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53" version = "v1.1" +[[projects]] + name = "github.com/pelletier/go-toml" + packages = ["."] + revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8" + version = "v1.1.0" + [[projects]] branch = "master" name = "github.com/petar/GoLLRB" @@ -277,6 +313,12 @@ revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + [[projects]] name = "github.com/prometheus/client_golang" packages = ["prometheus"] @@ -301,18 +343,54 @@ packages = [".","xfs"] revision = "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2" +[[projects]] + name = "github.com/spf13/afero" + packages = [".","mem"] + revision = "63644898a8da0bc22138abf860edaf5277b6102e" + version = "v1.1.0" + +[[projects]] + name = "github.com/spf13/cast" + packages = ["."] + revision = "8965335b8c7107321228e3e3702cab9832751bac" + version = "v1.2.0" + [[projects]] branch = "master" name = "github.com/spf13/cobra" packages = ["."] revision = "b78744579491c1ceeaaa3b40205e56b0591b93a3" +[[projects]] + branch = "master" + name = "github.com/spf13/jwalterweatherman" + packages = ["."] + revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" + [[projects]] name = "github.com/spf13/pflag" packages = ["."] revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" version = "v1.0.0" +[[projects]] + name = "github.com/spf13/viper" + packages = ["."] + revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" + version = "v1.0.2" + +[[projects]] + name = "github.com/stretchr/testify" + packages = ["assert","require"] + revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" + version = "v1.2.1" + +[[projects]] + branch = "master" + name = "github.com/termie/go-shutil" + packages = ["."] + revision = "bcacb06fecaeec8dc42af03c87c6949f4a05c74c" + [[projects]] branch = "master" name = "github.com/ugorji/go" @@ -328,7 +406,7 @@ [[projects]] branch = "master" name = "golang.org/x/net" - packages = ["context","context/ctxhttp","html","html/atom","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace","websocket"] + packages = ["context","context/ctxhttp","html","html/atom","html/charset","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace","websocket"] revision = "66aacef3dd8a676686c7ae3716979581e8b03c47" [[projects]] @@ -346,7 +424,7 @@ [[projects]] branch = "master" name = "golang.org/x/text" - packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"] + packages = ["encoding","encoding/charmap","encoding/htmlindex","encoding/internal","encoding/internal/identifier","encoding/japanese","encoding/korean","encoding/simplifiedchinese","encoding/traditionalchinese","encoding/unicode","internal/gen","internal/tag","internal/triegen","internal/ucd","internal/utf8internal","language","runes","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"] revision = "bd91bbf73e9a4a801adbfb97133c992678533126" [[projects]] @@ -426,9 +504,15 @@ packages = ["pkg/builder","pkg/common","pkg/handler","pkg/util","pkg/util/proto"] revision = "76a6671bcfb1d229fc7fd543aee23ddff6b5fc7e" +[[projects]] + branch = "master" + name = "sigs.k8s.io/testing_frameworks" + packages = ["integration","integration/internal"] + revision = "e71b0a734afabc881ad4a1b45837b6e4ea63deb5" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "a0e23f0f70d531bf4dd93e9d79df69c5913b70a7c3d016915511e24d78dd5754" + inputs-digest = "9a80a4b8a6eaf95b4367c6121011c698d542b1aef6df1c133bd6defa04f8567c" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Makefile b/Makefile index 29d918a9a..912d88c95 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,9 @@ CMDS := controller apiserver pilot-elasticsearch pilot-cassandra GOPATH ?= /tmp/go GOFLAGS ?= "-a" +# Path of the Navigator Cassandra Pilot for integration tests +TEST_ASSET_NAVIGATOR_PILOT_CASSANDRA ?= "${CURDIR}/navigator-pilot-cassandra_linux_amd64" + help: # all - runs verify, build and docker_build targets # test - runs go_test target @@ -32,7 +35,7 @@ help: all: verify build docker_build -test: go_test +test: go_test test_integration .run_e2e: ${HACK_DIR}/prepare-e2e.sh @@ -44,7 +47,7 @@ build: $(CMDS) generate: .generate_files -verify: .hack_verify dep_verify go_verify helm_verify +verify: .hack_verify dep_verify go_verify helm_verify test_integration .hack_verify: @echo Running repo-infra verify scripts @@ -90,7 +93,7 @@ $(CMDS): go_build: $(CMDS) go_test: - go test -v $$(go list ./... | grep -v '/vendor/') + go test -v $$(go list ./... | grep -v -e '/vendor/' -e 'github.com/jetstack/navigator/test/') go_fmt: ./hack/verify-lint.sh @@ -105,3 +108,11 @@ go_fmt: # Helm targets helm_verify: helm lint contrib/charts/* + +.download_integration_test_binaries: + $(HACK_DIR)/download-integration-test-binaries.sh + touch .download_integration_test_binaries + +test_integration: .download_integration_test_binaries apiserver pilot-cassandra + TEST_ASSET_NAVIGATOR_PILOT_CASSANDRA=$(TEST_ASSET_NAVIGATOR_PILOT_CASSANDRA) \ + go test $(GO_TEST_ARGS) -v ./test/integration/... diff --git a/docs/cassandra.rst b/docs/cassandra.rst index 179f605c1..fced978ed 100644 --- a/docs/cassandra.rst +++ b/docs/cassandra.rst @@ -25,6 +25,8 @@ All the C* nodes (pods) in a ``nodepool`` have the same configuration and the fo .. include:: configure-scheduler.rst +.. _availability-zones-cassandra: + Cassandra Across Multiple Availability Zones -------------------------------------------- @@ -240,6 +242,37 @@ Navigator will add C* nodes, one at a time, until the desired number of nodes is You can look at ``CassandraCluster.Status.NodePools[].ReadyReplicas`` to see the current number of healthy C* nodes in each ``nodepool``. +Pilots and Cassandra Docker Images +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, Navigator will use the `Cassandra Docker images from DockerHub `_. +It will use an image with a tag matching the supplied ``CassandraCluster.Spec.Version`` field. +If you prefer to use your own container image you should configure the ``CassandraCluster.Spec.Image`` fields. + +Navigator installs a ``navigator-pilot-cassandra`` executable into each Pod at the path ``/pilot``. +This ``pilot`` process connects to the API server to: +get extra configuration settings, +report the status of this C* node, and to +perform leader election of a single pilot in the cluster. + +The ``pilot`` overrides the following keys in the default ``/etc/cassandra/cassandra.yaml`` file: + +* ``cluster_name``: This will be set to match the name of the corresponding ``CassandraCluster`` resource in the API server. +* ``listen_address`` / ``listen_interface`` / ``broadcast_address`` / ``rpc_address`` / ``broadcast_rpc_address``: These keys will be set to ``null``. + This ensures that Cassandra process listens and communicates using the IP address currently associated with the fully qualified domain name of the Pod. + This is important if the Pod is moved to another node and is assigned a different IP address. + Removing these settings from the Configuration file ensures that Cassandra uses the most recent IP address that Kubernetes has assigned to the Pod and that other C* nodes in the cluster are notified of the change of IP address. +* ``seed_provider``: This is set to ``io.jetstack.cassandra.KubernetesSeedProvider`` which allows Cassandra to look up the seed IP addresses from a Kubernetes service. + The ``seed_provider.*.seeds`` sub key will be emptied. + This is to avoid the risk of nodes mistakenly joining the cluster as seeds if the seed provider service is temporarily unavailable. + +The ``pilot`` also overwrites ``cassandra-rackdc.properties`` with values derived from the ``CassandraCluster.Spec.Nodepools`` (see :ref:`availability-zones-cassandra`). + +Finally the ``pilot`` executes ``/usr/sbin/cassandra`` directly. + +.. note:: + The default entry point (e.g. `/docker-entrypoint.sh `_ is ignored. + Supported Versions ------------------ diff --git a/docs/quick-start/cassandra-cluster.yaml b/docs/quick-start/cassandra-cluster.yaml index 29ca9bdfa..20a9a7763 100644 --- a/docs/quick-start/cassandra-cluster.yaml +++ b/docs/quick-start/cassandra-cluster.yaml @@ -31,3 +31,5 @@ spec: pilotImage: repository: "quay.io/jetstack/navigator-pilot-cassandra" tag: "v0.1.0" + securityContext: + runAsUser: 999 diff --git a/hack/download-integration-test-binaries.sh b/hack/download-integration-test-binaries.sh new file mode 100755 index 000000000..c45d19553 --- /dev/null +++ b/hack/download-integration-test-binaries.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# +# Download the binaries needed by the sigs.k8s.io/testing_frameworks/integration package. +# etcd, kube-apiserver, kubectl +# XXX: There is already a script to do this: +# * sigs.k8s.io/testing_frameworks/integration/scripts/download-binaries.sh +# But it currently downloads kube-apiserver v1.10.0-alpha.1 which doesn't support ``CustomResourceSubresources``. +# See https://github.com/kubernetes-sigs/testing_frameworks/issues/44 + +set -o errexit +set -o nounset +set -o pipefail +set -o xtrace + +# Close stdin +exec 0<&- + +ROOT_DIR="$(git rev-parse --show-toplevel)" + +ETCD_VERSION=v3.2.10 +ETCD_URL="https://storage.googleapis.com/etcd" + +KUBE_VERSION_URL="https://storage.googleapis.com/kubernetes-release/release/stable-1.10.txt" +KUBE_VERSION=$(curl --fail --silent "${KUBE_VERSION_URL}") +KUBE_BIN_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64" + +ASSETS_DIR="${ROOT_DIR}/vendor/sigs.k8s.io/testing_frameworks/integration/assets/bin" + +mkdir -p "${ASSETS_DIR}" + +curl --fail --silent ${ETCD_URL}/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz | \ + tar --extract --gzip --directory="${ASSETS_DIR}" --strip-components=1 --wildcards '*/etcd' +curl --fail --silent --output "${ASSETS_DIR}/kube-apiserver" "${KUBE_BIN_URL}/kube-apiserver" +curl --fail --silent --output "${ASSETS_DIR}/kubectl" "${KUBE_BIN_URL}/kubectl" + +chmod +x ${ASSETS_DIR}/* diff --git a/hack/libe2e.sh b/hack/libe2e.sh index 0ca633ed4..aa8073d03 100644 --- a/hack/libe2e.sh +++ b/hack/libe2e.sh @@ -131,11 +131,12 @@ function signal_cassandra_process() { local pod="${2}" local signal="${3}" - # Send STOP signal to all the cassandra user's processes + # Send a signal to the Cassandra Daemon process. kubectl \ --namespace="${namespace}" \ exec "${pod}" -- \ - bash -c "kill -${signal}"' -- $(ps -u cassandra -o pid=) && ps faux' + bash -c \ + "kill -${signal}"' -- $(ps -o pid,cmd= | awk "/org.apache.cassandra.service.CassandraDaemon$/{print \$1}")' } function simulate_unresponsive_cassandra_process() { diff --git a/hack/testdata/cass-cluster-test.template.yaml b/hack/testdata/cass-cluster-test.template.yaml index 39f300636..00518ccd1 100644 --- a/hack/testdata/cass-cluster-test.template.yaml +++ b/hack/testdata/cass-cluster-test.template.yaml @@ -24,3 +24,5 @@ spec: repository: "${NAVIGATOR_IMAGE_REPOSITORY}/navigator-pilot-cassandra" tag: "${NAVIGATOR_IMAGE_TAG}" pullPolicy: "${NAVIGATOR_IMAGE_PULLPOLICY}" + securityContext: + runAsUser: 999 diff --git a/internal/test/util/testfs/testfs.go b/internal/test/util/testfs/testfs.go new file mode 100644 index 000000000..3aef2e5a4 --- /dev/null +++ b/internal/test/util/testfs/testfs.go @@ -0,0 +1,53 @@ +package testfs + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +type TestFs struct { + t *testing.T + d string +} + +func New(t *testing.T) *TestFs { + d := fmt.Sprintf(".test/%s", t.Name()) + d, err := filepath.Abs(d) + require.NoError(t, err) + err = os.RemoveAll(d) + if err != nil && !os.IsNotExist(err) { + t.Fatalf("Error while removing old test directory: %s", err) + } + + err = os.MkdirAll(d, os.ModePerm) + require.NoError(t, err) + + return &TestFs{ + t: t, + d: d, + } +} + +func (tfs *TestFs) TempPath(name string) string { + outPath := path.Join(tfs.d, name) + tmpFile, err := ioutil.TempFile(tfs.d, name) + require.NoError(tfs.t, err) + err = tmpFile.Close() + require.NoError(tfs.t, err) + err = os.Rename(tmpFile.Name(), outPath) + require.NoError(tfs.t, err) + return outPath +} + +func (tfs *TestFs) TempDir(name string) string { + outPath := path.Join(tfs.d, name) + err := os.MkdirAll(outPath, os.ModePerm) + require.NoError(tfs.t, err) + return outPath +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 000000000..fa25dcc2d --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,84 @@ +package config + +// config package provides an API with which Navigator components can read yaml +// or properties files and override certain keys before writing them back to the +// filesystem. +// +// This is currently a thin wrapper around ``viper`` which is already able to read and +// write files in multiple formats. +// The ``Interface`` has only the operations which are needed by Navigator, so +// we can swap viper something else if necessary in the future. +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" + "github.com/spf13/viper" + shutil "github.com/termie/go-shutil" +) + +const ( + BackupSuffix = ".navigator_original" +) + +type config struct { + *viper.Viper +} + +type Interface interface { + Get(key string) interface{} + Set(key string, value interface{}) + Unset(key string) + WriteConfig() error + AllSettings() map[string]interface{} +} + +var _ Interface = &config{} + +func newFrom(path, format string) (Interface, error) { + path, err := filepath.Abs(path) + if err != nil { + return nil, errors.Wrap(err, "unable to read absolute path") + } + c := &config{viper.New()} + c.SetConfigFile(path) + c.SetConfigType(format) + f, err := os.Open(path) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "unable to open file") + } + defer f.Close() + err = c.ReadConfig(f) + if err != nil { + return nil, errors.Wrap(err, "unable to read file") + } + return c, nil +} + +// NewFromProperties reads a .yaml file. +func NewFromYaml(path string) (Interface, error) { + return newFrom(path, "yaml") +} + +// NewFromProperties reads a .properties file +func NewFromProperties(path string) (Interface, error) { + return newFrom(path, "properties") +} + +// Unset sets the value of the supplied ``key`` to ``null`` +func (c *config) Unset(key string) { + var null *struct{} + c.Set(key, null) +} + +// WriteConfig writes the configuration to file using the format of the original file. +// A backup is made, if the file already exists. +func (c *config) WriteConfig() error { + path := c.ConfigFileUsed() + backupPath := path + BackupSuffix + err := shutil.CopyFile(path, backupPath, false) + if err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "unable to create backup file") + } + return c.Viper.WriteConfig() +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go new file mode 100644 index 000000000..ef5658132 --- /dev/null +++ b/pkg/config/config_test.go @@ -0,0 +1,108 @@ +package config_test + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + shutil "github.com/termie/go-shutil" + + "github.com/jetstack/navigator/internal/test/util/testfs" + "github.com/jetstack/navigator/pkg/config" +) + +// PathExists returns true if the path exists (file, directory or symlink) +func PathExists(path string) (bool, error) { + _, err := os.Lstat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// AssertNotPathExists asserts that a path does not exist. +// This is missing from the testify library. +func AssertNotPathExists(t *testing.T, path string) { + exists, err := PathExists(path) + require.NoError(t, err) + if exists { + assert.Fail(t, "AssertNotPathExists", fmt.Sprintf("Path %q exists", path)) + } +} + +// TestUnset verifies that Unset sets the supplied key to null / nil +func TestUnset(t *testing.T) { + tfs := testfs.New(t) + inPath := tfs.TempPath("config.yaml") + err := shutil.CopyFile("testdata/config1.yaml", inPath, false) + require.NoError(t, err) + c1, err := config.NewFromYaml(inPath) + require.NoError(t, err) + assert.NotNil(t, "a.b.c") + c1.Unset("a.b.c") + err = c1.WriteConfig() + require.NoError(t, err) + c2, err := config.NewFromYaml(inPath) + require.NoError(t, err) + assert.Nil(t, c2.Get("a.b.c")) +} + +// The file written by WriteConfig can be re-read and results in identical settings. +func TestRoundTrip(t *testing.T) { + tfs := testfs.New(t) + + inPath := tfs.TempPath("config.yaml") + err := shutil.CopyFile("testdata/config1.yaml", inPath, false) + require.NoError(t, err) + + c1, err := config.NewFromYaml(inPath) + require.NoError(t, err) + + err = c1.WriteConfig() + require.NoError(t, err) + + c2, err := config.NewFromYaml(inPath) + require.NoError(t, err) + + assert.Equal(t, c1.AllSettings(), c2.AllSettings()) +} + +// TestBackup verifies that backup files are created when an existing +// configuration file already exists. +func TestBackup(t *testing.T) { + t.Run( + "Backup file is created only when the config is written", + func(t *testing.T) { + tfs := testfs.New(t) + path := tfs.TempPath("conf1.yaml") + cfg, err := config.NewFromYaml(path) + require.NoError(t, err) + backupPath := path + config.BackupSuffix + AssertNotPathExists(t, backupPath) + err = cfg.WriteConfig() + require.NoError(t, err) + assert.FileExists(t, backupPath) + }, + ) + t.Run( + "Backup file is only created if a config file already exists", + func(t *testing.T) { + tfs := testfs.New(t) + dir := tfs.TempDir("conf") + // A non-existent filepath + path := dir + "/conf1.yaml" + cfg, err := config.NewFromYaml(path) + require.NoError(t, err) + backupPath := path + config.BackupSuffix + AssertNotPathExists(t, backupPath) + err = cfg.WriteConfig() + require.NoError(t, err) + AssertNotPathExists(t, backupPath) + }, + ) +} diff --git a/pkg/config/testdata/config1.yaml b/pkg/config/testdata/config1.yaml new file mode 100644 index 000000000..7be1fe9dc --- /dev/null +++ b/pkg/config/testdata/config1.yaml @@ -0,0 +1,5 @@ +a: + b: + c: + - c1 + - c2 diff --git a/pkg/controllers/cassandra/nodepool/resource.go b/pkg/controllers/cassandra/nodepool/resource.go index 1d6786447..d98fee3cb 100644 --- a/pkg/controllers/cassandra/nodepool/resource.go +++ b/pkg/controllers/cassandra/nodepool/resource.go @@ -23,8 +23,6 @@ const ( cassDataVolumeName = "cassandra-data" cassDataVolumeMountPath = "/var/lib/cassandra" - cassSnitch = "GossipingPropertyFileSnitch" - // See https://jolokia.org/reference/html/agents.html#jvm-agent jolokiaHost = "127.0.0.1" jolokiaPort = 8778 @@ -106,6 +104,9 @@ func StatefulSetForCluster( jolokiaPort, jolokiaContext, ), + "--cassandra-cluster-name", cluster.Name, + "--cassandra-rack", ptr.DerefString(np.Rack), + "--cassandra-dc", ptr.DerefString(np.Datacenter), }, Image: fmt.Sprintf( "%s:%s", @@ -196,38 +197,6 @@ func StatefulSetForCluster( Name: "HEAP_NEWSIZE", Value: "100M", }, - // Deliberately set to a single space to force Cassandra to do a host name lookup. - // See https://github.com/apache/cassandra/blob/cassandra-3.11.2/conf/cassandra.yaml#L592 - { - Name: "CASSANDRA_LISTEN_ADDRESS", - Value: " ", - }, - { - Name: "CASSANDRA_BROADCAST_ADDRESS", - Value: " ", - }, - { - Name: "CASSANDRA_RPC_ADDRESS", - Value: " ", - }, - // Set a non-existent default seed. - // The Kubernetes Seed Provider will fall back to a default seed host if it can't look up seeds via the CASSANDRA_SERVICE. - // And if the CASSANDRA_SEEDS environment variable is not set, it defaults to localhost. - // Which could cause confusion if a non-seed node is temporarily unable to lookup the seed nodes from the service. - // We want the list of seeds to be strictly controlled by the service. - // See: - // https://github.com/docker-library/cassandra/blame/master/3.11/docker-entrypoint.sh#L31 and - // https://github.com/apache/cassandra/blob/cassandra-3.11.2/conf/cassandra.yaml#L416 and - // https://github.com/kubernetes/examples/blob/cabf8b8e4739e576837111e156763d19a64a3591/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java#L69 and - // https://github.com/kubernetes/examples/blob/cabf8b8e4739e576837111e156763d19a64a3591/cassandra/go/main.go#L51 - { - Name: "CASSANDRA_SEEDS", - Value: "black-hole-dns-name", - }, - { - Name: "CASSANDRA_ENDPOINT_SNITCH", - Value: cassSnitch, - }, { Name: "CASSANDRA_SERVICE", Value: util.SeedsServiceName(cluster), @@ -236,14 +205,6 @@ func StatefulSetForCluster( Name: "CASSANDRA_CLUSTER_NAME", Value: cluster.Name, }, - { - Name: "CASSANDRA_DC", - Value: ptr.DerefString(np.Datacenter), - }, - { - Name: "CASSANDRA_RACK", - Value: ptr.DerefString(np.Rack), - }, { Name: "JVM_OPTS", Value: fmt.Sprintf( diff --git a/pkg/pilot/cassandra/v3/options.go b/pkg/pilot/cassandra/v3/options.go index bfd7d0a0c..60be493ac 100644 --- a/pkg/pilot/cassandra/v3/options.go +++ b/pkg/pilot/cassandra/v3/options.go @@ -18,9 +18,11 @@ import ( ) const ( - defaultResyncPeriod = time.Second * 30 - defaultConfigDir = "/etc/pilot" - defaultJolokiaURL = "http://127.0.0.1:8778/jolokia/" + defaultResyncPeriod = time.Second * 30 + defaultConfigDir = "/etc/pilot" + defaultJolokiaURL = "http://127.0.0.1:8778/jolokia/" + defaultCassandraPath = "/usr/sbin/cassandra" + defaultCassandraConfigPath = "/etc/cassandra" ) // PilotOptions are the options required to run this Pilot. This can be used to @@ -44,6 +46,21 @@ type PilotOptions struct { // JolokiaURL is the base URL of the Jolokia REST API server. JolokiaURL string + // CassandraClusterName is the cluster_name that will be written to cassandra.yaml + CassandraClusterName string + + // CassandraPath is the path to the cassandra executable + CassandraPath string + + // CassandraConfigPath is the path to the Cassandra configuration directory + CassandraConfigPath string + + // CassandraRack is the Cassandra rack name + CassandraRack string + + // CassandraConfigPath is the Cassandra DC name + CassandraDC string + // GenericPilotOptions contains options for the genericpilot GenericPilotOptions *genericpilot.Options @@ -74,6 +91,11 @@ func (o *PilotOptions) AddFlags(flags *pflag.FlagSet) { flags.DurationVar(&o.ResyncPeriod, "resync-period", defaultResyncPeriod, "Re-sync period for control loops operated by the pilot") flags.StringVar(&o.ConfigDir, "config-dir", defaultConfigDir, "Base directory for additional Pilot configuration") flags.StringVar(&o.JolokiaURL, "jolokia-url", defaultJolokiaURL, "The base URL of the Jolokia REST API server") + flags.StringVar(&o.CassandraClusterName, "cassandra-cluster-name", "", "The cluster_name value that will be written to cassandra.yaml") + flags.StringVar(&o.CassandraPath, "cassandra-path", defaultCassandraPath, "The path to the cassandra executable") + flags.StringVar(&o.CassandraConfigPath, "cassandra-config-path", defaultCassandraConfigPath, "The path to cassandra.yaml") + flags.StringVar(&o.CassandraRack, "cassandra-rack", "", "The Cassandra rack name.") + flags.StringVar(&o.CassandraDC, "cassandra-dc", "", "The Cassandra DC name.") } func (o *PilotOptions) Complete() error { diff --git a/pkg/pilot/cassandra/v3/pilot.go b/pkg/pilot/cassandra/v3/pilot.go index 817d9c0e9..c057f2608 100644 --- a/pkg/pilot/cassandra/v3/pilot.go +++ b/pkg/pilot/cassandra/v3/pilot.go @@ -2,23 +2,32 @@ package v3 import ( "fmt" - "io/ioutil" "os" "os/exec" - "strings" + "path" "k8s.io/client-go/tools/cache" "github.com/golang/glog" + "github.com/pkg/errors" "github.com/jetstack/navigator/pkg/apis/navigator/v1alpha1" "github.com/jetstack/navigator/pkg/cassandra/nodetool" clientset "github.com/jetstack/navigator/pkg/client/clientset/versioned" listersv1alpha1 "github.com/jetstack/navigator/pkg/client/listers/navigator/v1alpha1" + "github.com/jetstack/navigator/pkg/config" "github.com/jetstack/navigator/pkg/pilot/genericpilot" "github.com/jetstack/navigator/pkg/pilot/genericpilot/hook" ) +const ( + CassSnitch = "GossipingPropertyFileSnitch" + CassSeedProvider = "io.jetstack.cassandra.KubernetesSeedProvider" + + CassandraYaml = "cassandra.yaml" + CassandraRackDcProperties = "cassandra-rackdc.properties" +) + type Pilot struct { Options *PilotOptions @@ -42,24 +51,6 @@ func NewPilot(opts *PilotOptions) (*Pilot, error) { pilotInformerSynced: pilotInformer.Informer().HasSynced, nodeTool: opts.nodeTool, } - - // hack to test the seedprovider, this should use whatever pattern is decided upon here: - // https://github.com/jetstack/navigator/issues/251 - cfgPath := "/etc/cassandra/cassandra.yaml" - read, err := ioutil.ReadFile(cfgPath) - if err != nil { - return nil, err - } - - newContents := strings.Replace(string(read), - "org.apache.cassandra.locator.SimpleSeedProvider", - "io.jetstack.cassandra.KubernetesSeedProvider", -1) - - err = ioutil.WriteFile(cfgPath, []byte(newContents), 0) - if err != nil { - return nil, err - } - return p, nil } @@ -71,11 +62,15 @@ func (p *Pilot) WaitForCacheSync(stopCh <-chan struct{}) error { } func (p *Pilot) Hooks() *hook.Hooks { - return &hook.Hooks{} + return &hook.Hooks{ + PreStart: []hook.Interface{ + hook.New("WriteConfig", p.WriteConfig), + }, + } } func (p *Pilot) CmdFunc(pilot *v1alpha1.Pilot) (*exec.Cmd, error) { - cmd := exec.Command("/docker-entrypoint.sh") + cmd := exec.Command(p.Options.CassandraPath, "-f") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd, nil @@ -129,3 +124,68 @@ func (p *Pilot) LivenessCheck() error { _, err := p.nodeTool.Status() return err } + +func (p *Pilot) writeCassandraYaml(pilot *v1alpha1.Pilot) error { + cfg, err := config.NewFromYaml( + path.Join(p.Options.CassandraConfigPath, "cassandra.yaml"), + ) + if err != nil { + return err + } + cfg.Set("cluster_name", p.Options.CassandraClusterName) + // We unset any pre-configured IP / host addresses so that cassandra is + // forced to lookup of its FQDN and corresponding IP address. + // This allows cassandra to handle changes of IP address if Pods are restarted. + cfg.Unset("listen_address") + cfg.Unset("listen_interface") + cfg.Unset("broadcast_address") + cfg.Unset("rpc_address") + // Force the use of GossipingPropertyFileSnitch so that cassandra will query the + // rackdc properties file for rack and DC values. + cfg.Set("endpoint_snitch", CassSnitch) + // Force the use of Kubernetes seed provider + // And remove preconfigured seeds so that cassandra can only get seed + // information from the Kubernetes service. + cfg.Set( + "seed_provider", + []map[string]interface{}{{ + "class_name": CassSeedProvider, + "parameters": []interface{}{ + map[string]interface{}{ + "seeds": "", + }, + }, + }}, + ) + return cfg.WriteConfig() +} + +func (p *Pilot) writeCassandraRackDcProperties(pilot *v1alpha1.Pilot) error { + cfg, err := config.NewFromProperties( + path.Join(p.Options.CassandraConfigPath, "cassandra-rackdc.properties"), + ) + if err != nil { + return err + } + cfg.Set("rack", p.Options.CassandraRack) + cfg.Set("dc", p.Options.CassandraDC) + return cfg.WriteConfig() +} + +// WriteConfig creates Navigator compatible Cassandra configuration files. +// It reads default cassandra.yaml and cassandra-rackdc.properties files +// and overrides configuration values that are vital for Navigator to manage +// the cassandra cluster. +func (p *Pilot) WriteConfig(pilot *v1alpha1.Pilot) (err error) { + err = p.writeCassandraYaml(pilot) + if err != nil { + return errors.Wrap(err, "unable to write cassandra.yaml") + } + + err = p.writeCassandraRackDcProperties(pilot) + if err != nil { + return errors.Wrap(err, "unable to write cassandra-rackdc.properties") + } + + return nil +} diff --git a/pkg/pilot/cassandra/v3/pilot_test.go b/pkg/pilot/cassandra/v3/pilot_test.go index 591f8944c..ba60b5916 100644 --- a/pkg/pilot/cassandra/v3/pilot_test.go +++ b/pkg/pilot/cassandra/v3/pilot_test.go @@ -3,10 +3,16 @@ package v3 import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + shutil "github.com/termie/go-shutil" + + "github.com/jetstack/navigator/internal/test/util/testfs" "github.com/jetstack/navigator/pkg/api/version" "github.com/jetstack/navigator/pkg/apis/navigator/v1alpha1" "github.com/jetstack/navigator/pkg/cassandra/nodetool" fakenodetool "github.com/jetstack/navigator/pkg/cassandra/nodetool/fake" + "github.com/jetstack/navigator/pkg/config" ) func TestPilotSyncFunc(t *testing.T) { @@ -87,3 +93,72 @@ func TestPilotSyncFunc(t *testing.T) { ) } } + +func TestWriteConfig(t *testing.T) { + t.Run( + "create cassandra.yaml", + func(t *testing.T) { + tfs := testfs.New(t) + etc := tfs.TempDir("etc") + cassConfigPath := etc + "/cassandra" + err := shutil.CopyTree("testdata", cassConfigPath, nil) + require.NoError(t, err) + + pilotResource := &v1alpha1.Pilot{} + p := &Pilot{ + Options: &PilotOptions{ + CassandraConfigPath: cassConfigPath, + }, + } + + err = p.WriteConfig(pilotResource) + require.NoError(t, err) + cassandraYaml := cassConfigPath + "/cassandra.yaml" + cfg, err := config.NewFromYaml(cassandraYaml) + require.NoError(t, err) + assert.Nil(t, cfg.Get("listen_address")) + assert.Nil(t, cfg.Get("listen_interface")) + assert.Nil(t, cfg.Get("broadcast_address")) + assert.Nil(t, cfg.Get("rpc_address")) + assert.Equal(t, CassSnitch, cfg.Get("endpoint_snitch")) + seedProviders := cfg.Get("seed_provider").([]interface{}) + assert.Len(t, seedProviders, 1) + seedProvider := seedProviders[0].(map[interface{}]interface{}) + assert.Equal(t, CassSeedProvider, seedProvider["class_name"]) + parameters := seedProvider["parameters"].([]interface{}) + assert.Len(t, parameters, 1) + parameter := parameters[0].(map[interface{}]interface{}) + assert.Equal(t, "", parameter["seeds"]) + }, + ) + t.Run( + "create cassandra-rackdc.properties", + func(t *testing.T) { + tfs := testfs.New(t) + etc := tfs.TempDir("etc") + cassConfigPath := etc + "/cassandra" + err := shutil.CopyTree("testdata", cassConfigPath, nil) + require.NoError(t, err) + + expectedRack := "rack-name-foo" + expectedDC := "dc-bar" + + pilotResource := &v1alpha1.Pilot{} + p := &Pilot{ + Options: &PilotOptions{ + CassandraConfigPath: cassConfigPath, + CassandraRack: expectedRack, + CassandraDC: expectedDC, + }, + } + + err = p.WriteConfig(pilotResource) + require.NoError(t, err) + + cfg, err := config.NewFromProperties(cassConfigPath + "/cassandra-rackdc.properties") + require.NoError(t, err) + assert.Equal(t, expectedRack, cfg.Get("rack")) + assert.Equal(t, expectedDC, cfg.Get("dc")) + }, + ) +} diff --git a/pkg/pilot/cassandra/v3/testdata/cassandra-rackdc.properties b/pkg/pilot/cassandra/v3/testdata/cassandra-rackdc.properties new file mode 100644 index 000000000..2ea604384 --- /dev/null +++ b/pkg/pilot/cassandra/v3/testdata/cassandra-rackdc.properties @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# These properties are used with GossipingPropertyFileSnitch and will +# indicate the rack and dc for this node +dc=dc1 +rack=rack1 + +# Add a suffix to a datacenter name. Used by the Ec2Snitch and Ec2MultiRegionSnitch +# to append a string to the EC2 region name. +#dc_suffix= + +# Uncomment the following line to make this snitch prefer the internal ip when possible, as the Ec2MultiRegionSnitch does. +# prefer_local=true diff --git a/pkg/pilot/cassandra/v3/testdata/cassandra.yaml b/pkg/pilot/cassandra/v3/testdata/cassandra.yaml new file mode 100644 index 000000000..68237459a --- /dev/null +++ b/pkg/pilot/cassandra/v3/testdata/cassandra.yaml @@ -0,0 +1,1237 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting on the node's initial start, +# on subsequent starts, this setting will apply even if initial token is set. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# Triggers automatic allocation of num_tokens tokens for this node. The allocation +# algorithm attempts to choose tokens in a way that optimizes replicated load over +# the nodes in the datacenter for the replication strategy used by the specified +# keyspace. +# +# The load assigned to each node will be close to proportional to its number of +# vnodes. +# +# Only supported with the Murmur3Partitioner. +# allocate_tokens_for_keyspace: KEYSPACE + +# initial_token allows you to specify tokens manually. While you can use it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +# May either be "true" or "false" to enable globally +hinted_handoff_enabled: true + +# When hinted_handoff_enabled is true, a black list of data centers that will not +# perform hinted handoff +# hinted_handoff_disabled_datacenters: +# - DC1 +# - DC2 + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours + +# Maximum throttle in KBs per second, per delivery thread. This will be +# reduced proportionally to the number of nodes in the cluster. (If there +# are two nodes in the cluster, each delivery thread will use the maximum +# rate; if there are three, each will throttle to half of the maximum, +# since we expect two nodes to be delivering hints simultaneously.) +hinted_handoff_throttle_in_kb: 1024 + +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# Directory where Cassandra should store hints. +# If not set, the default directory is $CASSANDRA_HOME/data/hints. +# hints_directory: /var/lib/cassandra/hints + +# How often hints should be flushed from the internal buffers to disk. +# Will *not* trigger fsync. +hints_flush_period_in_ms: 10000 + +# Maximum size for a single hints file, in megabytes. +max_hints_file_size_in_mb: 128 + +# Compression to apply to the hint files. If omitted, hints files +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +#hints_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# Maximum throttle in KBs per second, total. This will be +# reduced proportionally to the number of nodes in the cluster. +batchlog_replay_throttle_in_kb: 1024 + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.roles table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) +authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: AllowAllAuthorizer + +# Part of the Authentication & Authorization backend, implementing IRoleManager; used +# to maintain grants and memberships between roles. +# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, +# which stores role information in the system_auth keyspace. Most functions of the +# IRoleManager require an authenticated login, so unless the configured IAuthenticator +# actually implements authentication, most of this functionality will be unavailable. +# +# - CassandraRoleManager stores role data in the system_auth keyspace. Please +# increase system_auth keyspace replication factor if you use this role manager. +role_manager: CassandraRoleManager + +# Validity period for roles cache (fetching granted roles can be an expensive +# operation depending on the role manager, CassandraRoleManager is one example) +# Granted roles are cached for authenticated sessions in AuthenticatedUser and +# after the period specified here, become eligible for (async) reload. +# Defaults to 2000, set to 0 to disable caching entirely. +# Will be disabled automatically for AllowAllAuthenticator. +roles_validity_in_ms: 2000 + +# Refresh interval for roles cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If roles_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as roles_validity_in_ms. +# roles_update_interval_in_ms: 2000 + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# Validity period for credentials cache. This cache is tightly coupled to +# the provided PasswordAuthenticator implementation of IAuthenticator. If +# another IAuthenticator implementation is configured, this cache will not +# be automatically used and so the following settings will have no effect. +# Please note, credentials are cached in their encrypted form, so while +# activating this cache may reduce the number of queries made to the +# underlying table, it may not bring a significant reduction in the +# latency of individual authentication attempts. +# Defaults to 2000, set to 0 to disable credentials caching. +credentials_validity_in_ms: 2000 + +# Refresh interval for credentials cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If credentials_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as credentials_validity_in_ms. +# credentials_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Besides Murmur3Partitioner, partitioners included for backwards +# compatibility include RandomPartitioner, ByteOrderedPartitioner, and +# OrderPreservingPartitioner. +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +# If not set, the default directory is $CASSANDRA_HOME/data/data. +# data_file_directories: +# - /var/lib/cassandra/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# If not set, the default directory is $CASSANDRA_HOME/data/commitlog. +# commitlog_directory: /var/lib/cassandra/commitlog + +# Enable / disable CDC functionality on a per-node basis. This modifies the logic used +# for write path allocation rejection (standard: never reject. cdc: reject Mutation +# containing a CDC-enabled table if at space limit in cdc_raw_directory). +cdc_enabled: false + +# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the +# segment contains mutations for a CDC-enabled table. This should be placed on a +# separate spindle than the data directories. If not set, the default directory is +# $CASSANDRA_HOME/data/cdc_raw. +# cdc_raw_directory: /var/lib/cassandra/cdc_raw + +# Policy for data disk failures: +# +# die +# shut down gossip and client transports and kill the JVM for any fs errors or +# single-sstable errors, so the node can be replaced. +# +# stop_paranoid +# shut down gossip and client transports even for single-sstable errors, +# kill the JVM for errors during startup. +# +# stop +# shut down gossip and client transports, leaving the node effectively dead, but +# can still be inspected via JMX, kill the JVM for errors during startup. +# +# best_effort +# stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# +# ignore +# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Policy for commit disk failures: +# +# die +# shut down gossip and Thrift and kill the JVM, so the node can be replaced. +# +# stop +# shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# +# stop_commit +# shutdown the commit log, letting writes collect but +# continuing to service reads, as in pre-2.0.5 Cassandra +# +# ignore +# ignore fatal errors and let the batches fail +commit_failure_policy: stop + +# Maximum size of the native protocol prepared statement cache +# +# Valid values are either "auto" (omitting the value) or a value greater 0. +# +# Note that specifying a too large value will result in long running GCs and possbily +# out-of-memory errors. Keep the value at a small fraction of the heap. +# +# If you constantly see "prepared statements discarded in the last minute because +# cache limit reached" messages, the first step is to investigate the root cause +# of these messages and check whether prepared statements are used correctly - +# i.e. use bind markers for variable parts. +# +# Do only change the default value, if you really have more prepared statements than +# fit in the cache. In most cases it is not neccessary to change this value. +# Constantly re-preparing statements is a performance penalty. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +prepared_statements_cache_size_mb: + +# Maximum size of the Thrift prepared statement cache +# +# If you do not use Thrift at all, it is safe to leave this value at "auto". +# +# See description of 'prepared_statements_cache_size_mb' above for more information. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +thrift_prepared_statements_cache_size_mb: + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Row cache implementation class name. Available implementations: +# +# org.apache.cassandra.cache.OHCProvider +# Fully off-heap row cache implementation (default). +# +# org.apache.cassandra.cache.SerializingCacheProvider +# This is the row cache implementation availabile +# in previous releases of Cassandra. +# row_cache_class_name: org.apache.cassandra.cache.OHCProvider + +# Maximum size of the row cache in memory. +# Please note that OHC cache implementation requires some additional off-heap memory to manage +# the map structures and some in-flight memory during operations before/after cache entries can be +# accounted against the cache capacity. This overhead is usually small compared to the whole capacity. +# Do not specify more memory that the system can afford in the worst usual situation and leave some +# headroom for OS block level cache. Do never allow your system to swap. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should save the row cache. +# Caches are saved to saved_caches_directory as specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save. +# Specify 0 (which is the default), meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# Maximum size of the counter cache in memory. +# +# Counter cache helps to reduce counter locks' contention for hot counter cells. +# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before +# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration +# of the lock hold, helping with hot counter cell updates, but will not allow skipping +# the read entirely. Only the local (clock, count) tuple of a counter cell is kept +# in memory, not the whole counter, so it's relatively cheap. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. +# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. +counter_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the counter cache (keys only). Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Default is 7200 or 2 hours. +counter_cache_save_period: 7200 + +# Number of keys from the counter cache to save +# Disabled by default, meaning all keys are going to be saved +# counter_cache_keys_to_save: 100 + +# saved caches +# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. +# saved_caches_directory: /var/lib/cassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +# Max mutation size is also configurable via max_mutation_size_in_kb setting in +# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. +# This should be positive and less than 2048. +# +# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must +# be set to at least twice the size of max_mutation_size_in_kb / 1024 +# +commitlog_segment_size_in_mb: 32 + +# Compression to apply to the commit log. If omitted, the commit log +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +# commitlog_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "127.0.0.1" + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. Same applies to +# "concurrent_counter_writes", since counter writes read the current +# values before incrementing and writing them back. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 +concurrent_counter_writes: 32 + +# For materialized view writes, as there is a read involved, so this should +# be limited by the less of concurrent reads or concurrent writes. +concurrent_materialized_view_writes: 32 + +# Maximum memory to use for sstable chunk cache and buffer pooling. +# 32MB of this are reserved for pooling buffers, the rest is used as an +# cache that holds uncompressed sstable chunks. +# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, +# so is in addition to the memory allocated for heap. The cache also has on-heap +# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size +# if the default 64k chunk size is used). +# Memory is only allocated when needed. +# file_cache_size_in_mb: 512 + +# Flag indicating whether to allocate on or off heap when the sstable buffer +# pool is exhausted, that is when it has exceeded the maximum memory +# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. + +# buffer_pool_use_heap_if_exhausted: true + +# The strategy for optimizing disk read +# Possible values are: +# ssd (for solid state disks, the default) +# spinning (for spinning disks) +# disk_optimization_strategy: ssd + +# Total permitted memory to use for memtables. Cassandra will stop +# accepting writes when the limit is exceeded until a flush completes, +# and will trigger a flush based on memtable_cleanup_threshold +# If omitted, Cassandra will set both to 1/4 the size of the heap. +# memtable_heap_space_in_mb: 2048 +# memtable_offheap_space_in_mb: 2048 + +# memtable_cleanup_threshold is deprecated. The default calculation +# is the only reasonable choice. See the comments on memtable_flush_writers +# for more information. +# +# Ratio of occupied non-flushing memtable size to total permitted size +# that will trigger a flush of the largest memtable. Larger mct will +# mean larger flushes and hence less compaction, but also less concurrent +# flush activity which can make it difficult to keep your disks fed +# under heavy write load. +# +# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) +# memtable_cleanup_threshold: 0.11 + +# Specify the way Cassandra allocates and manages memtable memory. +# Options are: +# +# heap_buffers +# on heap nio buffers +# +# offheap_buffers +# off heap (direct) nio buffers +# +# offheap_objects +# off heap objects +memtable_allocation_type: heap_buffers + +# Total space to use for commit logs on disk. +# +# If space gets above this value, Cassandra will flush every dirty CF +# in the oldest segment and remove it. So a small total commitlog space +# will tend to cause more flush activity on less-active columnfamilies. +# +# The default value is the smaller of 8192, and 1/4 of the total space +# of the commitlog volume. +# +# commitlog_total_space_in_mb: 8192 + +# This sets the number of memtable flush writer threads per disk +# as well as the total number of memtables that can be flushed concurrently. +# These are generally a combination of compute and IO bound. +# +# Memtable flushing is more CPU efficient than memtable ingest and a single thread +# can keep up with the ingest rate of a whole server on a single fast disk +# until it temporarily becomes IO bound under contention typically with compaction. +# At that point you need multiple flush threads. At some point in the future +# it may become CPU bound all the time. +# +# You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation +# metric which should be 0, but will be non-zero if threads are blocked waiting on flushing +# to free memory. +# +# memtable_flush_writers defaults to two for a single data directory. +# This means that two memtables can be flushed concurrently to the single data directory. +# If you have multiple data directories the default is one memtable flushing at a time +# but the flush will use a thread per data directory so you will get two or more writers. +# +# Two is generally enough to flush on a fast disk [array] mounted as a single data directory. +# Adding more flush writers will result in smaller more frequent flushes that introduce more +# compaction overhead. +# +# There is a direct tradeoff between number of memtables that can be flushed concurrently +# and flush size and frequency. More is not better you just need enough flush writers +# to never stall waiting for flushing to free memory. +# +#memtable_flush_writers: 2 + +# Total space to use for change-data-capture logs on disk. +# +# If space gets above this value, Cassandra will throw WriteTimeoutException +# on Mutations including tables with CDC enabled. A CDCCompactor is responsible +# for parsing the raw CDC logs and deleting them when parsing is completed. +# +# The default value is the min of 4096 mb and 1/8th of the total space +# of the drive where cdc_raw_directory resides. +# cdc_total_space_in_mb: 4096 + +# When we hit our cdc_raw limit and the CDCCompactor is either running behind +# or experiencing backpressure, we check at the following interval to see if any +# new space for cdc-tracked tables has been made available. Default to 250ms +# cdc_free_space_check_interval_ms: 250 + +# A fixed memory pool size in MB for for SSTable index summaries. If left +# empty, this will default to 5% of the heap size. If the memory usage of +# all index summaries exceeds this limit, SSTables with low read rates will +# shrink their index summaries in order to meet this limit. However, this +# is a best-effort process. In extreme conditions Cassandra may need to use +# more than this amount of memory. +index_summary_capacity_in_mb: + +# How frequently index summaries should be resampled. This is done +# periodically to redistribute memory from the fixed-size pool to sstables +# proportional their recent read rates. Setting to -1 will disable this +# process, leaving existing index summaries at their current sampling level. +index_summary_resize_interval_in_minutes: 60 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +ssl_storage_port: 7001 + +# Address or interface to bind to and tell other Cassandra nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# Set listen_address OR listen_interface, not both. +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting listen_address to 0.0.0.0 is always wrong. +# +listen_address: localhost + +# Set listen_address OR listen_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# listen_interface: eth0 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# listen_interface_prefer_ipv6: false + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# When using multiple physical network interfaces, set this +# to true to listen on broadcast_address in addition to +# the listen_address, allowing nodes to communicate in both +# interfaces. +# Ignore this property if the network configuration automatically +# routes between the public and private networks such as EC2. +# listen_on_broadcast_address: false + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +native_transport_port: 9042 +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +# native_transport_port_ssl: 9142 +# The maximum threads for handling requests when the native transport is used. +# This is similar to rpc_max_threads though the default differs slightly (and +# there is no native_transport_min_threads, idle threads will always be stopped +# after 30 seconds). +# native_transport_max_threads: 128 +# +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. If you're changing this parameter, +# you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. +# native_transport_max_frame_size_in_mb: 256 + +# The maximum number of concurrent client connections. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections: -1 + +# The maximum number of concurrent client connections per source ip. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections_per_ip: -1 + +# Whether to start the thrift rpc server. +start_rpc: false + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +rpc_address: localhost + +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# rpc_interface: eth1 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# RPC address to broadcast to drivers and other Cassandra nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +# broadcast_rpc_address: 1.2.3.4 + +# enable or disable keepalive on rpc/native connections +rpc_keepalive: true + +# Cassandra provides two out-of-the-box options for the RPC Server: +# +# sync +# One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha +# Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). If hsha is selected then it is essential +# that rpc_max_threads is changed from the default value of unlimited. +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See also: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and 'man tcp' +# internode_send_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum message length). +thrift_framed_transport_size_in_mb: 15 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# +# - a smaller granularity means more index entries are generated +# and looking up rows withing the partition by collation column +# is faster +# - but, Cassandra will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +column_index_size_in_kb: 64 + +# Per sstable indexed key cache entries (the collation index in memory +# mentioned above) exceeding this size will not be held on heap. +# This means that only partition information is held on heap and the +# index entries are read from disk. +# +# Note that this size refers to the size of the +# serialized index information and not the size of the partition. +column_index_cache_size_in_kb: 2 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the smaller of (number of disks, +# number of cores), with a minimum of 2 and a maximum of 8. +# +# If your data directories are backed by SSD, you should increase this +# to the number of cores. +#concurrent_compactors: 1 + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# When compacting, the replacement sstable(s) can be opened before they +# are completely written, and used in place of the prior sstables for +# any range that has been written. This helps to smoothly transfer reads +# between the sstables, reducing page cache churn and keeping hot rows hot +sstable_preemptive_open_interval_in_mb: 50 + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# Throttles all streaming file transfer between the datacenters, +# this setting allows users to throttle inter dc stream throughput in addition +# to throttling all network stream traffic as configured with +# stream_throughput_outbound_megabits_per_sec +# When unset, the default is 200 Mbps or 25 MB/s +# inter_dc_stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# How long the coordinator should wait for counter writes to complete +counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# How long before a node logs slow queries. Select queries that take longer than +# this timeout to execute, will generate an aggregated log message, so that slow queries +# can be identified. Set this value to zero to disable slow query logging. +slow_query_log_timeout_in_ms: 500 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts. If disabled, replicas will assume that requests +# were forwarded to them instantly by the coordinator, which means that +# under overload conditions we will waste that much extra time processing +# already-timed-out requests. +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Set keep-alive period for streaming +# This node will send a keep-alive message periodically with this period. +# If the node does not receive a keep-alive message from the peer for +# 2 keep-alive cycles the stream session times out and fail +# Default value is 300s (5 minutes), which means stalled stream +# times out in 10 minutes by default +# streaming_keep_alive_period_in_secs: 300 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH +# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. +# This means that if you start with the default SimpleSnitch, which +# locates every node on "rack1" in "datacenter1", your only options +# if you need to add another datacenter are GossipingPropertyFileSnitch +# (and the older PFS). From there, if you want to migrate to an +# incompatible snitch like Ec2Snitch you can do it by adding new nodes +# under Ec2Snitch (which will locate them in a new "datacenter") and +# decommissioning the old ones. +# +# Out of the box, Cassandra provides: +# +# SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# +# GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# +# PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# +# Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# +# Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# +# NoScheduler +# Has no options +# +# RoundRobin +# throttle_limit +# The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# default_weight +# default_weight is optional and allows for +# overriding the default which is 1. +# weights +# Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# Enable or disable inter-node encryption +# JVM defaults for supported SSL socket protocols and cipher suites can +# be replaced using custom encryption options. This is not recommended +# unless you have policies in place that dictate certain settings, or +# need to disable vulnerable ciphers or protocols in case the JVM cannot +# be updated. +# FIPS compliant settings can be configured at JVM level and should not +# involve changing encryption settings here: +# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html +# *NOTE* No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + # require_endpoint_verification: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + # If enabled and optional is set to true encrypted and unencrypted connections are handled. + optional: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# Can be: +# +# all +# all traffic is compressed +# +# dc +# traffic between different datacenters is compressed +# +# none +# nothing is compressed. +internode_compression: dc + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +# TTL for different trace types used during logging of the repair process. +tracetype_query_ttl: 86400 +tracetype_repair_ttl: 604800 + +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +# This threshold can be adjusted to minimize logging if necessary +# gc_log_threshold_in_ms: 200 + +# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at +# INFO level +# UDFs (user defined functions) are disabled by default. +# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. +enable_user_defined_functions: false + +# Enables scripted UDFs (JavaScript UDFs). +# Java UDFs are always enabled, if enable_user_defined_functions is true. +# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. +# This option has no effect, if enable_user_defined_functions is false. +enable_scripted_user_defined_functions: false + +# Enables materialized view creation on this node. +# Materialized views are considered experimental and are not recommended for production use. +enable_materialized_views: true + +# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. +# Lowering this value on Windows can provide much tighter latency and better throughput, however +# some virtualized environments may see a negative performance impact from changing this setting +# below their system default. The sysinternals 'clockres' tool can confirm your system's default +# setting. +windows_timer_interval: 1 + + +# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from +# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by +# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys +# can still (and should!) be in the keystore and will be used on decrypt operations +# (to handle the case of key rotation). +# +# It is strongly recommended to download and install Java Cryptography Extension (JCE) +# Unlimited Strength Jurisdiction Policy Files for your version of the JDK. +# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) +# +# Currently, only the following file types are supported for transparent data encryption, although +# more are coming in future cassandra releases: commitlog, hints +transparent_data_encryption_options: + enabled: false + chunk_length_kb: 64 + cipher: AES/CBC/PKCS5Padding + key_alias: testing:1 + # CBC IV length for AES needs to be 16 bytes (which is also the default size) + # iv_length: 16 + key_provider: + - class_name: org.apache.cassandra.security.JKSKeyProvider + parameters: + - keystore: conf/.keystore + keystore_password: cassandra + store_type: JCEKS + key_password: cassandra + + +##################### +# SAFETY THRESHOLDS # +##################### + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +tombstone_warn_threshold: 1000 +tombstone_failure_threshold: 100000 + +# Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 5 + +# Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. +batch_size_fail_threshold_in_kb: 50 + +# Log WARN on any batches not of type LOGGED than span across more partitions than this limit +unlogged_batch_across_partitions_warn_threshold: 10 + +# Log a warning when compacting partitions larger than this value +compaction_large_partition_warning_threshold_mb: 100 + +# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level +# Adjust the threshold based on your application throughput requirement +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +gc_warn_threshold_in_ms: 1000 + +# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption +# early. Any value size larger than this threshold will result into marking an SSTable +# as corrupted. This should be positive and less than 2048. +# max_value_size_in_mb: 256 + +# Back-pressure settings # +# If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation +# sent to replicas, with the aim of reducing pressure on overloaded replicas. +back_pressure_enabled: false +# The back-pressure strategy applied. +# The default implementation, RateBasedBackPressure, takes three arguments: +# high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. +# If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; +# if above high ratio, the rate limiting is increased by the given factor; +# such factor is usually best configured between 1 and 10, use larger values for a faster recovery +# at the expense of potentially more dropped mutations; +# the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, +# if SLOW at the speed of the slowest one. +# New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and +# provide a public constructor accepting a Map. +back_pressure_strategy: + - class_name: org.apache.cassandra.net.RateBasedBackPressure + parameters: + - high_ratio: 0.90 + factor: 5 + flow: FAST + +# Coalescing Strategies # +# Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). +# On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in +# virtualized environments, the point at which an application can be bound by network packet processing can be +# surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal +# doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process +# is sufficient for many applications such that no load starvation is experienced even without coalescing. +# There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages +# per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one +# trip to read from a socket, and all the task submission work can be done at the same time reducing context switching +# and increasing cache friendliness of network message processing. +# See CASSANDRA-8692 for details. + +# Strategy to use for coalescing messages in OutboundTcpConnection. +# Can be fixed, movingaverage, timehorizon, disabled (default). +# You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. +# otc_coalescing_strategy: DISABLED + +# How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first +# message is received before it will be sent with any accompanying messages. For moving average this is the +# maximum amount of time that will be waited as well as the interval at which messages must arrive on average +# for coalescing to be enabled. +# otc_coalescing_window_us: 200 + +# Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. +# otc_coalescing_enough_coalesced_messages: 8 + +# How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. +# Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory +# taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value +# will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU +# time and queue contention while iterating the backlog of messages. +# An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. +# +# otc_backlog_expiration_interval_ms: 200 diff --git a/pkg/pilot/genericpilot/genericpilot.go b/pkg/pilot/genericpilot/genericpilot.go index 61662944b..3462572ad 100644 --- a/pkg/pilot/genericpilot/genericpilot.go +++ b/pkg/pilot/genericpilot/genericpilot.go @@ -111,10 +111,12 @@ func (g *GenericPilot) runController(stopCh <-chan struct{}) <-chan error { func (g *GenericPilot) runElector(stopCh <-chan struct{}) <-chan error { out := make(chan error, 1) - go func() { - defer close(out) - out <- g.elector.Run() - }() + if g.Options.LeaderElect { + go func() { + defer close(out) + out <- g.elector.Run() + }() + } return out } diff --git a/pkg/pilot/genericpilot/options.go b/pkg/pilot/genericpilot/options.go index 17e61edde..d6f456de1 100644 --- a/pkg/pilot/genericpilot/options.go +++ b/pkg/pilot/genericpilot/options.go @@ -44,6 +44,7 @@ type Options struct { PilotName string // PilotNamespace is the namespace the corresponding Pilot exists within PilotNamespace string + LeaderElect bool LeaderElectionConfigMap string // CmdFunc returns an *exec.Cmd for a given Pilot resource for the pilot @@ -179,5 +180,7 @@ func (o *Options) Pilot() (*GenericPilot, error) { func (o *Options) AddFlags(flags *pflag.FlagSet) { flags.StringVar(&o.PilotName, "pilot-name", "", "The name of this Pilot. If not specified, an auto-detected name will be used.") flags.StringVar(&o.PilotNamespace, "pilot-namespace", "", "The namespace the corresponding Pilot resource for this Pilot exists within.") + flags.BoolVar(&o.LeaderElect, "leader-elect", true, ""+ + "If true, navigator will perform leader election between pilots.") flags.StringVar(&o.LeaderElectionConfigMap, "leader-election-config-map", "", "The name of the ConfigMap to use for leader election") } diff --git a/test/integration/pilot_test.go b/test/integration/pilot_test.go new file mode 100644 index 000000000..5f9474958 --- /dev/null +++ b/test/integration/pilot_test.go @@ -0,0 +1,200 @@ +package integration_test + +import ( + "os" + "os/exec" + "path/filepath" + "syscall" + "testing" + "time" + + "github.com/onsi/gomega/gbytes" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "sigs.k8s.io/testing_frameworks/integration" + + "github.com/jetstack/navigator/internal/test/util/testfs" +) + +// MakeKubeConfig creates a kube/config file which is suitable for communicating +// with the API server started by ``integration.ControlPlane``. +func MakeKubeConfig(f *integration.ControlPlane, path string) error { + ctl := f.KubeCtl() + ctl.Opts = []string{"--kubeconfig", path} + + _, _, err := ctl.Run( + "config", + "set-credentials", + "user1", + ) + if err != nil { + return errors.Wrap(err, "unable to create user") + } + + _, _, err = ctl.Run( + "config", + "set-cluster", + "integration1", + "--server", + f.APIURL().String(), + "--certificate-authority", + filepath.Join(f.APIServer.CertDir, "apiserver.crt"), + ) + if err != nil { + return errors.Wrap(err, "unable to create cluster") + } + + _, _, err = ctl.Run( + "config", + "set-context", + "default", + "--cluster", "integration1", + "--user", "user1", + ) + if err != nil { + return errors.Wrap(err, "unable to create context") + } + + _, _, err = ctl.Run( + "config", + "use-context", + "default", + ) + if err != nil { + return errors.Wrap(err, "unable to use context") + } + return nil +} + +func TestPilotCassandra(t *testing.T) { + // Start the API server with CustomResourceSubresources feature enabled. + // Navigator pilot calls on UpdateStatus on Pilot resources for example. + cp := &integration.ControlPlane{ + APIServer: &integration.APIServer{ + Args: []string{ + "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", + "--cert-dir={{ .CertDir }}", + "--insecure-port={{ if .URL }}{{ .URL.Port }}{{ end }}", + "--insecure-bind-address={{ if .URL }}{{ .URL.Hostname }}{{ end }}", + "--secure-port=0", + "--feature-gates=CustomResourceSubresources=true", + "-v=4", "--alsologtostderr", + }, + // Out: os.Stdout, + // Err: os.Stderr, + }, + } + err := cp.Start() + require.NoError(t, err) + defer func() { + err := cp.Stop() + if err != nil { + t.Fatal(err) + } + }() + cli := cp.KubeCtl() + + // Create a CRD for the Navigator Pilot resource type. + // This allows us to test the Pilot against the Kubernetes API server, + // without having to start the Navigator API server and configure aggregation etc. + crdPath, err := filepath.Abs("testdata/pilot_crd.yaml") + require.NoError(t, err) + stdout, stderr, err := cli.Run( + "apply", + "--filename", + crdPath, + ) + t.Log("stdout", stdout) + t.Log("stderr", stderr) + require.NoError(t, err) + + // Create a Pilot resource for our Pilot process to watch and update. + stdout, stderr, err = cli.Run( + "create", + "namespace", + "ns1", + ) + t.Log("stdout", stdout) + t.Log("stderr", stderr) + require.NoError(t, err) + pilotResourcePath, err := filepath.Abs("testdata/pilot.yaml") + require.NoError(t, err) + stdout, stderr, err = cli.Run( + "apply", + "--filename", + pilotResourcePath, + ) + t.Log("stdout", stdout) + t.Log("stderr", stderr) + require.NoError(t, err) + + // Temporary configuration directories. + tfs := testfs.New(t) + kubeConfig := tfs.TempPath("kube.config") + cassConfig := tfs.TempDir("etc_cassandra") + pilotConfigDir := tfs.TempDir("etc/pilot") + // A fake cassandra binary for the Pilot to execute. + cassPath, err := filepath.Abs("testdata/fake_cassandra") + require.NoError(t, err) + err = MakeKubeConfig(cp, kubeConfig) + require.NoError(t, err) + + pilotPath, pilotPathFound := os.LookupEnv("TEST_ASSET_NAVIGATOR_PILOT_CASSANDRA") + if !pilotPathFound { + t.Fatal( + "Please set environment variable TEST_ASSET_NAVIGATOR_PILOT_CASSANDRA " + + "with the path to the navigator-pilot-cassandra binary under test.") + } + + expectedClusterName := "cluster1" + cmd := exec.Command( + pilotPath, + "--pilot-name", "pilot1", + "--pilot-namespace", "ns1", + "--kubeconfig", kubeConfig, + "--v=4", "--alsologtostderr", + "--leader-elect=false", + "--cassandra-cluster-name", expectedClusterName, + "--cassandra-config-path", cassConfig, + "--cassandra-path", cassPath, + "--cassandra-rack", "rack-for-test", + "--cassandra-dc", "dc-for-test", + "--config-dir", pilotConfigDir, + ) + startDetectStream := gbytes.NewBuffer() + // The fake Cassandra script echos "FAKE CASSANDRA" in a loop + ready := startDetectStream.Detect("FAKE CASSANDRA") + cmd.Stdout = startDetectStream + cmd.Stderr = os.Stderr + // This will return when the executable writes "FAKE CASSANDRA" to stdout. + err = cmd.Start() + require.NoError(t, err) + defer func() { + // XXX: Pilot doesn't respond to SIGKILL + err := cmd.Process.Signal(os.Interrupt) + require.NoError(t, err) + err = cmd.Wait() + if exiterr, ok := err.(*exec.ExitError); ok { + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + if status.ExitStatus() == 255 { + // XXX Pilot should return 0 if it exits cleanly after a signal. + return + } + } + } + require.NoError(t, err) + }() + select { + case <-ready: + case <-time.After(time.Second * 5): + t.Fatal("timeout waiting for process to start") + } + // The pilot has executed the Cassandra sub-process, so it should already + // have written the configuration files as part of the pre-start hook + // mechanism. + // We did not provide an existing cassandra.yaml or cassandra-rackdc.properties. + // So we expect thos files to now exist. + assert.FileExists(t, cassConfig+"/cassandra.yaml") + assert.FileExists(t, cassConfig+"/cassandra-rackdc.properties") +} diff --git a/test/integration/testdata/cassandra.yaml b/test/integration/testdata/cassandra.yaml new file mode 100644 index 000000000..f98310056 --- /dev/null +++ b/test/integration/testdata/cassandra.yaml @@ -0,0 +1,2 @@ +foo: + bar: org.apache.cassandra.locator.SimpleSeedProvider diff --git a/test/integration/testdata/fake_cassandra b/test/integration/testdata/fake_cassandra new file mode 100755 index 000000000..4b51605b8 --- /dev/null +++ b/test/integration/testdata/fake_cassandra @@ -0,0 +1,8 @@ +#!/bin/bash +set -o errexit +set -o pipefail + +while true; do + echo "FAKE CASSANDRA" + sleep 1 +done diff --git a/test/integration/testdata/pilot.yaml b/test/integration/testdata/pilot.yaml new file mode 100644 index 000000000..ed07fecdb --- /dev/null +++ b/test/integration/testdata/pilot.yaml @@ -0,0 +1,11 @@ +apiVersion: navigator.jetstack.io/v1alpha1 +kind: Pilot +metadata: + name: pilot1 + namespace: ns1 + ownerReferences: + - apiVersion: navigator.jetstack.io/v1alpha1 + kind: CassandraCluster + name: cluster1 + uid: 23c88696-cf7b-11e7-9ec9-0a580a200927 + controller: true diff --git a/test/integration/testdata/pilot_crd.yaml b/test/integration/testdata/pilot_crd.yaml new file mode 100644 index 000000000..50e8ab31e --- /dev/null +++ b/test/integration/testdata/pilot_crd.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: pilots.navigator.jetstack.io +spec: + group: navigator.jetstack.io + version: v1alpha1 + scope: Namespaced + names: + plural: pilots + singular: pilot + kind: Pilot + # categories is a list of grouped resources the custom resource belongs to. + categories: + - all + subresources: + status: {} diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 000000000..ba49e3c23 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,5 @@ +root = true + +[*] +indent_style = tab +indent_size = 4 diff --git a/vendor/github.com/fsnotify/fsnotify/.github/ISSUE_TEMPLATE.md b/vendor/github.com/fsnotify/fsnotify/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..4ad1aed8f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,11 @@ +Before reporting an issue, please ensure you are using the latest release of fsnotify. + +### Which operating system (GOOS) and version are you using? + +Linux: lsb_release -a +macOS: sw_vers +Windows: systeminfo | findstr /B /C:OS + +### Please describe the issue that occurred. + +### Are you able to reproduce the issue? Please provide steps to reproduce and a code sample if possible. diff --git a/vendor/github.com/fsnotify/fsnotify/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/fsnotify/fsnotify/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..64ddf7cef --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,8 @@ +#### What does this pull request do? + + +#### Where should the reviewer start? + + +#### How should this be manually tested? + diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 000000000..4cd0cbaf4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml new file mode 100644 index 000000000..981d1bb81 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -0,0 +1,30 @@ +sudo: false +language: go + +go: + - 1.8.x + - 1.9.x + - tip + +matrix: + allow_failures: + - go: tip + fast_finish: true + +before_script: + - go get -u github.com/golang/lint/golint + +script: + - go test -v --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - test -z "$(golint ./... | tee /dev/stderr)" + - go vet ./... + +os: + - linux + - osx + +notifications: + email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 000000000..5ab5d41c5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 000000000..be4d7ea2c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,317 @@ +# Changelog + +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## v1.4.2 / 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## v1.4.1 / 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## v1.4.0 / 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## v1.3.1 / 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## v1.3.0 / 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## v1.2.10 / 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## v1.2.9 / 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## v1.2.8 / 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## v1.2.5 / 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## v1.2.1 / 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 000000000..828a60b24 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 000000000..f21e54080 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 000000000..399320741 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,79 @@ +# File system notifications for Go + +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) + +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: + +```console +go get -u golang.org/x/sys/... +``` + +Cross platform: Windows, Linux, BSD and macOS. + +|Adapter |OS |Status | +|----------|----------|----------| +|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| +|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| +|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| +|fanotify |Linux 2.6.37+ | | +|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| +|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)| + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/vendor/github.com/fsnotify/fsnotify/example_test.go b/vendor/github.com/fsnotify/fsnotify/example_test.go new file mode 100644 index 000000000..700502cb3 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/example_test.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package fsnotify_test + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func ExampleNewWatcher() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event := <-watcher.Events: + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err := <-watcher.Errors: + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 000000000..ced39cb88 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 000000000..190bf0de5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,66 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go new file mode 100644 index 000000000..f9771d9df --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go @@ -0,0 +1,70 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package fsnotify + +import ( + "os" + "testing" + "time" +) + +func TestEventStringWithValue(t *testing.T) { + for opMask, expectedString := range map[Op]string{ + Chmod | Create: `"/usr/someFile": CREATE|CHMOD`, + Rename: `"/usr/someFile": RENAME`, + Remove: `"/usr/someFile": REMOVE`, + Write | Chmod: `"/usr/someFile": WRITE|CHMOD`, + } { + event := Event{Name: "/usr/someFile", Op: opMask} + if event.String() != expectedString { + t.Fatalf("Expected %s, got: %v", expectedString, event.String()) + } + + } +} + +func TestEventOpStringWithValue(t *testing.T) { + expectedOpString := "WRITE|CHMOD" + event := Event{Name: "someFile", Op: Write | Chmod} + if event.Op.String() != expectedOpString { + t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String()) + } +} + +func TestEventOpStringWithNoValue(t *testing.T) { + expectedOpString := "" + event := Event{Name: "testFile", Op: 0} + if event.Op.String() != expectedOpString { + t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String()) + } +} + +// TestWatcherClose tests that the goroutine started by creating the watcher can be +// signalled to return at any time, even if there is no goroutine listening on the events +// or errors channels. +func TestWatcherClose(t *testing.T) { + t.Parallel() + + name := tempMkFile(t, "") + w := newWatcher(t) + err := w.Add(name) + if err != nil { + t.Fatal(err) + } + + err = os.Remove(name) + if err != nil { + t.Fatal(err) + } + // Allow the watcher to receive the event. + time.Sleep(time.Millisecond * 100) + + err = w.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 000000000..d9fd1b88a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 000000000..cc7db4b22 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(0) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller_test.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller_test.go new file mode 100644 index 000000000..26623efef --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller_test.go @@ -0,0 +1,229 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "testing" + "time" + + "golang.org/x/sys/unix" +) + +type testFd [2]int + +func makeTestFd(t *testing.T) testFd { + var tfd testFd + errno := unix.Pipe(tfd[:]) + if errno != nil { + t.Fatalf("Failed to create pipe: %v", errno) + } + return tfd +} + +func (tfd testFd) fd() int { + return tfd[0] +} + +func (tfd testFd) closeWrite(t *testing.T) { + errno := unix.Close(tfd[1]) + if errno != nil { + t.Fatalf("Failed to close write end of pipe: %v", errno) + } +} + +func (tfd testFd) put(t *testing.T) { + buf := make([]byte, 10) + _, errno := unix.Write(tfd[1], buf) + if errno != nil { + t.Fatalf("Failed to write to pipe: %v", errno) + } +} + +func (tfd testFd) get(t *testing.T) { + buf := make([]byte, 10) + _, errno := unix.Read(tfd[0], buf) + if errno != nil { + t.Fatalf("Failed to read from pipe: %v", errno) + } +} + +func (tfd testFd) close() { + unix.Close(tfd[1]) + unix.Close(tfd[0]) +} + +func makePoller(t *testing.T) (testFd, *fdPoller) { + tfd := makeTestFd(t) + poller, err := newFdPoller(tfd.fd()) + if err != nil { + t.Fatalf("Failed to create poller: %v", err) + } + return tfd, poller +} + +func TestPollerWithBadFd(t *testing.T) { + _, err := newFdPoller(-1) + if err != unix.EBADF { + t.Fatalf("Expected EBADF, got: %v", err) + } +} + +func TestPollerWithData(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + tfd.put(t) + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } + tfd.get(t) +} + +func TestPollerWithWakeup(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + err := poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if ok { + t.Fatalf("expected poller to return false") + } +} + +func TestPollerWithClose(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + tfd.closeWrite(t) + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } +} + +func TestPollerWithWakeupAndData(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + tfd.put(t) + err := poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + + // both data and wakeup + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } + + // data is still in the buffer, wakeup is cleared + ok, err = poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } + + tfd.get(t) + // data is gone, only wakeup now + err = poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + ok, err = poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if ok { + t.Fatalf("expected poller to return false") + } +} + +func TestPollerConcurrent(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + oks := make(chan bool) + live := make(chan bool) + defer close(live) + go func() { + defer close(oks) + for { + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + oks <- ok + if !<-live { + return + } + } + }() + + // Try a write + select { + case <-time.After(50 * time.Millisecond): + case <-oks: + t.Fatalf("poller did not wait") + } + tfd.put(t) + if !<-oks { + t.Fatalf("expected true") + } + tfd.get(t) + live <- true + + // Try a wakeup + select { + case <-time.After(50 * time.Millisecond): + case <-oks: + t.Fatalf("poller did not wait") + } + err := poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + if <-oks { + t.Fatalf("expected false") + } + live <- true + + // Try a close + select { + case <-time.After(50 * time.Millisecond): + case <-oks: + t.Fatalf("poller did not wait") + } + tfd.closeWrite(t) + if !<-oks { + t.Fatalf("expected true") + } + tfd.get(t) +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_test.go b/vendor/github.com/fsnotify/fsnotify/inotify_test.go new file mode 100644 index 000000000..54f3f00eb --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_test.go @@ -0,0 +1,449 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +func TestInotifyCloseRightAway(t *testing.T) { + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + + // Close immediately; it won't even reach the first unix.Read. + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func TestInotifyCloseSlightlyLater(t *testing.T) { + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + + // Wait until readEvents has reached unix.Read, and Close. + <-time.After(50 * time.Millisecond) + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + w.Add(testDir) + + // Wait until readEvents has reached unix.Read, and Close. + <-time.After(50 * time.Millisecond) + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func TestInotifyCloseAfterRead(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + + err = w.Add(testDir) + if err != nil { + t.Fatalf("Failed to add .") + } + + // Generate an event. + os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING")) + + // Wait for readEvents to read the event, then close the watcher. + <-time.After(50 * time.Millisecond) + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func isWatcherReallyClosed(t *testing.T, w *Watcher) { + select { + case err, ok := <-w.Errors: + if ok { + t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err) + } + default: + t.Fatalf("w.Errors would have blocked; readEvents is still alive!") + } + + select { + case _, ok := <-w.Events: + if ok { + t.Fatalf("w.Events is not closed; readEvents is still alive after closing") + } + default: + t.Fatalf("w.Events would have blocked; readEvents is still alive!") + } +} + +func TestInotifyCloseCreate(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + err = w.Add(testDir) + if err != nil { + t.Fatalf("Failed to add testDir: %v", err) + } + h, err := os.Create(filepath.Join(testDir, "testfile")) + if err != nil { + t.Fatalf("Failed to create file in testdir: %v", err) + } + h.Close() + select { + case _ = <-w.Events: + case err := <-w.Errors: + t.Fatalf("Error from watcher: %v", err) + case <-time.After(50 * time.Millisecond): + t.Fatalf("Took too long to wait for event") + } + + // At this point, we've received one event, so the goroutine is ready. + // It's also blocking on unix.Read. + // Now we try to swap the file descriptor under its nose. + w.Close() + w, err = NewWatcher() + defer w.Close() + if err != nil { + t.Fatalf("Failed to create second watcher: %v", err) + } + + <-time.After(50 * time.Millisecond) + err = w.Add(testDir) + if err != nil { + t.Fatalf("Error adding testDir again: %v", err) + } +} + +// This test verifies the watcher can keep up with file creations/deletions +// when under load. +func TestInotifyStress(t *testing.T) { + maxNumToCreate := 1000 + + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + testFilePrefix := filepath.Join(testDir, "testfile") + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + err = w.Add(testDir) + if err != nil { + t.Fatalf("Failed to add testDir: %v", err) + } + + doneChan := make(chan struct{}) + // The buffer ensures that the file generation goroutine is never blocked. + errChan := make(chan error, 2*maxNumToCreate) + + go func() { + for i := 0; i < maxNumToCreate; i++ { + testFile := fmt.Sprintf("%s%d", testFilePrefix, i) + + handle, err := os.Create(testFile) + if err != nil { + errChan <- fmt.Errorf("Create failed: %v", err) + continue + } + + err = handle.Close() + if err != nil { + errChan <- fmt.Errorf("Close failed: %v", err) + continue + } + } + + // If we delete a newly created file too quickly, inotify will skip the + // create event and only send the delete event. + time.Sleep(100 * time.Millisecond) + + for i := 0; i < maxNumToCreate; i++ { + testFile := fmt.Sprintf("%s%d", testFilePrefix, i) + err = os.Remove(testFile) + if err != nil { + errChan <- fmt.Errorf("Remove failed: %v", err) + } + } + + close(doneChan) + }() + + creates := 0 + removes := 0 + + finished := false + after := time.After(10 * time.Second) + for !finished { + select { + case <-after: + t.Fatalf("Not done") + case <-doneChan: + finished = true + case err := <-errChan: + t.Fatalf("Got an error from file creator goroutine: %v", err) + case err := <-w.Errors: + t.Fatalf("Got an error from watcher: %v", err) + case evt := <-w.Events: + if !strings.HasPrefix(evt.Name, testFilePrefix) { + t.Fatalf("Got an event for an unknown file: %s", evt.Name) + } + if evt.Op == Create { + creates++ + } + if evt.Op == Remove { + removes++ + } + } + } + + // Drain remaining events from channels + count := 0 + for count < 10 { + select { + case err := <-errChan: + t.Fatalf("Got an error from file creator goroutine: %v", err) + case err := <-w.Errors: + t.Fatalf("Got an error from watcher: %v", err) + case evt := <-w.Events: + if !strings.HasPrefix(evt.Name, testFilePrefix) { + t.Fatalf("Got an event for an unknown file: %s", evt.Name) + } + if evt.Op == Create { + creates++ + } + if evt.Op == Remove { + removes++ + } + count = 0 + default: + count++ + // Give the watcher chances to fill the channels. + time.Sleep(time.Millisecond) + } + } + + if creates-removes > 1 || creates-removes < -1 { + t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes) + } + if creates < 50 { + t.Fatalf("Expected at least 50 creates, got %d", creates) + } +} + +func TestInotifyRemoveTwice(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + testFile := filepath.Join(testDir, "testfile") + + handle, err := os.Create(testFile) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + handle.Close() + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + err = w.Add(testFile) + if err != nil { + t.Fatalf("Failed to add testFile: %v", err) + } + + err = w.Remove(testFile) + if err != nil { + t.Fatalf("wanted successful remove but got: %v", err) + } + + err = w.Remove(testFile) + if err == nil { + t.Fatalf("no error on removing invalid file") + } + + w.mu.Lock() + defer w.mu.Unlock() + if len(w.watches) != 0 { + t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches) + } + if len(w.paths) != 0 { + t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths) + } +} + +func TestInotifyInnerMapLength(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + testFile := filepath.Join(testDir, "testfile") + + handle, err := os.Create(testFile) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + handle.Close() + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + err = w.Add(testFile) + if err != nil { + t.Fatalf("Failed to add testFile: %v", err) + } + go func() { + for err := range w.Errors { + t.Fatalf("error received: %s", err) + } + }() + + err = os.Remove(testFile) + if err != nil { + t.Fatalf("Failed to remove testFile: %v", err) + } + _ = <-w.Events // consume Remove event + <-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated + + w.mu.Lock() + defer w.mu.Unlock() + if len(w.watches) != 0 { + t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches) + } + if len(w.paths) != 0 { + t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths) + } +} + +func TestInotifyOverflow(t *testing.T) { + // We need to generate many more events than the + // fs.inotify.max_queued_events sysctl setting. + // We use multiple goroutines (one per directory) + // to speed up file creation. + numDirs := 128 + numFiles := 1024 + + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + for dn := 0; dn < numDirs; dn++ { + testSubdir := fmt.Sprintf("%s/%d", testDir, dn) + + err := os.Mkdir(testSubdir, 0777) + if err != nil { + t.Fatalf("Cannot create subdir: %v", err) + } + + err = w.Add(testSubdir) + if err != nil { + t.Fatalf("Failed to add subdir: %v", err) + } + } + + errChan := make(chan error, numDirs*numFiles) + + for dn := 0; dn < numDirs; dn++ { + testSubdir := fmt.Sprintf("%s/%d", testDir, dn) + + go func() { + for fn := 0; fn < numFiles; fn++ { + testFile := fmt.Sprintf("%s/%d", testSubdir, fn) + + handle, err := os.Create(testFile) + if err != nil { + errChan <- fmt.Errorf("Create failed: %v", err) + continue + } + + err = handle.Close() + if err != nil { + errChan <- fmt.Errorf("Close failed: %v", err) + continue + } + } + }() + } + + creates := 0 + overflows := 0 + + after := time.After(10 * time.Second) + for overflows == 0 && creates < numDirs*numFiles { + select { + case <-after: + t.Fatalf("Not done") + case err := <-errChan: + t.Fatalf("Got an error from file creator goroutine: %v", err) + case err := <-w.Errors: + if err == ErrEventOverflow { + overflows++ + } else { + t.Fatalf("Got an error from watcher: %v", err) + } + case evt := <-w.Events: + if !strings.HasPrefix(evt.Name, testDir) { + t.Fatalf("Got an event for an unknown file: %s", evt.Name) + } + if evt.Op == Create { + creates++ + } + } + } + + if creates == numDirs*numFiles { + t.Fatalf("Could not trigger overflow") + } + + if overflows == 0 { + t.Fatalf("No overflow and not enough creates (expected %d, got %d)", + numDirs*numFiles, creates) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go b/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go new file mode 100644 index 000000000..cd6adc273 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go @@ -0,0 +1,147 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fsnotify + +import ( + "os" + "path/filepath" + "testing" + "time" + + "golang.org/x/sys/unix" +) + +// testExchangedataForWatcher tests the watcher with the exchangedata operation on macOS. +// +// This is widely used for atomic saves on macOS, e.g. TextMate and in Apple's NSDocument. +// +// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html +// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20 +func testExchangedataForWatcher(t *testing.T, watchDir bool) { + // Create directory to watch + testDir1 := tempMkdir(t) + + // For the intermediate file + testDir2 := tempMkdir(t) + + defer os.RemoveAll(testDir1) + defer os.RemoveAll(testDir2) + + resolvedFilename := "TestFsnotifyEvents.file" + + // TextMate does: + // + // 1. exchangedata (intermediate, resolved) + // 2. unlink intermediate + // + // Let's try to simulate that: + resolved := filepath.Join(testDir1, resolvedFilename) + intermediate := filepath.Join(testDir2, resolvedFilename+"~") + + // Make sure we create the file before we start watching + createAndSyncFile(t, resolved) + + watcher := newWatcher(t) + + // Test both variants in isolation + if watchDir { + addWatch(t, watcher, testDir1) + } else { + addWatch(t, watcher, resolved) + } + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var removeReceived counter + var createReceived counter + + done := make(chan bool) + + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(resolved) { + if event.Op&Remove == Remove { + removeReceived.increment() + } + if event.Op&Create == Create { + createReceived.increment() + } + } + t.Logf("event received: %s", event) + } + done <- true + }() + + // Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop. + for i := 1; i <= 3; i++ { + // The intermediate file is created in a folder outside the watcher + createAndSyncFile(t, intermediate) + + // 1. Swap + if err := unix.Exchangedata(intermediate, resolved, 0); err != nil { + t.Fatalf("[%d] exchangedata failed: %s", i, err) + } + + time.Sleep(50 * time.Millisecond) + + // 2. Delete the intermediate file + err := os.Remove(intermediate) + + if err != nil { + t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err) + } + + time.Sleep(50 * time.Millisecond) + + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + + // The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two: + if removeReceived.value() < 3 { + t.Fatal("fsnotify remove events have not been received after 500 ms") + } + + if createReceived.value() < 3 { + t.Fatal("fsnotify create events have not been received after 500 ms") + } + + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir. +func TestExchangedataInWatchedDir(t *testing.T) { + testExchangedataForWatcher(t, true) +} + +// TestExchangedataInWatchedDir test exchangedata operation on watched file. +func TestExchangedataInWatchedFile(t *testing.T) { + testExchangedataForWatcher(t, false) +} + +func createAndSyncFile(t *testing.T, filepath string) { + f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating %s failed: %s", filepath, err) + } + f1.Sync() + f1.Close() +} diff --git a/vendor/github.com/fsnotify/fsnotify/integration_test.go b/vendor/github.com/fsnotify/fsnotify/integration_test.go new file mode 100644 index 000000000..8b7e9d3ec --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/integration_test.go @@ -0,0 +1,1237 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +package fsnotify + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "sync/atomic" + "testing" + "time" +) + +// An atomic counter +type counter struct { + val int32 +} + +func (c *counter) increment() { + atomic.AddInt32(&c.val, 1) +} + +func (c *counter) value() int32 { + return atomic.LoadInt32(&c.val) +} + +func (c *counter) reset() { + atomic.StoreInt32(&c.val, 0) +} + +// tempMkdir makes a temporary directory +func tempMkdir(t *testing.T) string { + dir, err := ioutil.TempDir("", "fsnotify") + if err != nil { + t.Fatalf("failed to create test directory: %s", err) + } + return dir +} + +// tempMkFile makes a temporary file. +func tempMkFile(t *testing.T, dir string) string { + f, err := ioutil.TempFile(dir, "fsnotify") + if err != nil { + t.Fatalf("failed to create test file: %v", err) + } + defer f.Close() + return f.Name() +} + +// newWatcher initializes an fsnotify Watcher instance. +func newWatcher(t *testing.T) *Watcher { + watcher, err := NewWatcher() + if err != nil { + t.Fatalf("NewWatcher() failed: %s", err) + } + return watcher +} + +// addWatch adds a watch for a directory +func addWatch(t *testing.T, watcher *Watcher, dir string) { + if err := watcher.Add(dir); err != nil { + t.Fatalf("watcher.Add(%q) failed: %s", dir, err) + } +} + +func TestFsnotifyMultipleOperations(t *testing.T) { + watcher := newWatcher(t) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create directory that's not watched + testDirToMoveFiles := tempMkdir(t) + defer os.RemoveAll(testDirToMoveFiles) + + testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") + testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile") + + addWatch(t, watcher, testDir) + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, modifyReceived, deleteReceived, renameReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + if event.Op&Write == Write { + modifyReceived.increment() + } + if event.Op&Create == Create { + createReceived.increment() + } + if event.Op&Rename == Rename { + renameReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // Modify the file outside of the watched dir + f, err = os.Open(testFileRenamed) + if err != nil { + t.Fatalf("open test renamed file failed: %s", err) + } + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Recreate the file that was moved + f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Close() + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 2 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) + } + mReceived := modifyReceived.value() + if mReceived != 1 { + t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) + } + dReceived := deleteReceived.value() + rReceived := renameReceived.value() + if dReceived+rReceived != 1 { + t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyMultipleCreates(t *testing.T) { + watcher := newWatcher(t) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") + + addWatch(t, watcher, testDir) + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, modifyReceived, deleteReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + if event.Op&Create == Create { + createReceived.increment() + } + if event.Op&Write == Write { + modifyReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + os.Remove(testFile) + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Recreate the file + f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Close() + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Modify + f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Modify + f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 2 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) + } + mReceived := modifyReceived.value() + if mReceived < 3 { + t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3) + } + dReceived := deleteReceived.value() + if dReceived != 1 { + t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyDirOnly(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + // This should NOT add any events to the fsnotify event queue + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, modifyReceived, deleteReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + if event.Op&Write == Write { + modifyReceived.increment() + } + if event.Op&Create == Create { + createReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + os.Remove(testFile) + os.Remove(testFileAlreadyExists) + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 1 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1) + } + mReceived := modifyReceived.value() + if mReceived != 1 { + t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) + } + dReceived := deleteReceived.value() + if dReceived != 2 { + t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyDeleteWatchedDir(t *testing.T) { + watcher := newWatcher(t) + defer watcher.Close() + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + addWatch(t, watcher, testDir) + + // Add a watch for testFile + addWatch(t, watcher, testFileAlreadyExists) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var deleteReceived counter + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + }() + + os.RemoveAll(testDir) + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + dReceived := deleteReceived.value() + if dReceived < 2 { + t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived) + } +} + +func TestFsnotifySubDir(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile") + testSubDir := filepath.Join(testDir, "sub") + testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile") + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, deleteReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) { + t.Logf("event received: %s", event) + if event.Op&Create == Create { + createReceived.increment() + } + if event.Op&Remove == Remove { + deleteReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + addWatch(t, watcher, testDir) + + // Create sub-directory + if err := os.Mkdir(testSubDir, 0777); err != nil { + t.Fatalf("failed to create test sub-directory: %s", err) + } + + // Create a file + var f *os.File + f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + + // Create a file (Should not see this! we are not watching subdir) + var fs *os.File + fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + fs.Sync() + fs.Close() + + time.Sleep(200 * time.Millisecond) + + // Make sure receive deletes for both file and sub-directory + os.RemoveAll(testSubDir) + os.Remove(testFile1) + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 2 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) + } + dReceived := deleteReceived.value() + if dReceived != 2 { + t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyRename(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile") + testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var renameReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { + if event.Op&Rename == Rename { + renameReceived.increment() + } + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + f.WriteString("data") + f.Sync() + f.Close() + + // Add a watch for testFile + addWatch(t, watcher, testFile) + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + if renameReceived.value() == 0 { + t.Fatal("fsnotify rename events have not been received after 500 ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } + + os.Remove(testFileRenamed) +} + +func TestFsnotifyRenameToCreate(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create directory to get file + testDirFrom := tempMkdir(t) + defer os.RemoveAll(testDirFrom) + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") + testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { + if event.Op&Create == Create { + createReceived.increment() + } + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + if createReceived.value() == 0 { + t.Fatal("fsnotify create events have not been received after 500 ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } + + os.Remove(testFileRenamed) +} + +func TestFsnotifyRenameToOverwrite(t *testing.T) { + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS) + } + + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create directory to get file + testDirFrom := tempMkdir(t) + defer os.RemoveAll(testDirFrom) + + testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") + testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") + + // Create a file + var fr *os.File + fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + fr.Sync() + fr.Close() + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var eventReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testFileRenamed) { + eventReceived.increment() + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + if eventReceived.value() == 0 { + t.Fatal("fsnotify events have not been received after 500 ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } + + os.Remove(testFileRenamed) +} + +func TestRemovalOfWatch(t *testing.T) { + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + watcher := newWatcher(t) + defer watcher.Close() + + addWatch(t, watcher, testDir) + if err := watcher.Remove(testDir); err != nil { + t.Fatalf("Could not remove the watch: %v\n", err) + } + + go func() { + select { + case ev := <-watcher.Events: + t.Fatalf("We received event: %v\n", ev) + case <-time.After(500 * time.Millisecond): + t.Log("No event received, as expected.") + } + }() + + time.Sleep(200 * time.Millisecond) + // Modify the file outside of the watched dir + f, err := os.Open(testFileAlreadyExists) + if err != nil { + t.Fatalf("Open test file failed: %s", err) + } + f.WriteString("data") + f.Sync() + f.Close() + if err := os.Chmod(testFileAlreadyExists, 0700); err != nil { + t.Fatalf("chmod failed: %s", err) + } + time.Sleep(400 * time.Millisecond) +} + +func TestFsnotifyAttrib(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("attributes don't work on Windows.") + } + + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + // The modifyReceived counter counts IsModify events that are not IsAttrib, + // and the attribReceived counts IsAttrib events (which are also IsModify as + // a consequence). + var modifyReceived counter + var attribReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { + if event.Op&Write == Write { + modifyReceived.increment() + } + if event.Op&Chmod == Chmod { + attribReceived.increment() + } + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + f.WriteString("data") + f.Sync() + f.Close() + + // Add a watch for testFile + addWatch(t, watcher, testFile) + + if err := os.Chmod(testFile, 0700); err != nil { + t.Fatalf("chmod failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here + time.Sleep(500 * time.Millisecond) + if modifyReceived.value() != 0 { + t.Fatal("received an unexpected modify event when creating a test file") + } + if attribReceived.value() == 0 { + t.Fatal("fsnotify attribute events have not received after 500 ms") + } + + // Modifying the contents of the file does not set the attrib flag (although eg. the mtime + // might have been modified). + modifyReceived.reset() + attribReceived.reset() + + f, err = os.OpenFile(testFile, os.O_WRONLY, 0) + if err != nil { + t.Fatalf("reopening test file failed: %s", err) + } + + f.WriteString("more data") + f.Sync() + f.Close() + + time.Sleep(500 * time.Millisecond) + + if modifyReceived.value() != 1 { + t.Fatal("didn't receive a modify event after changing test file contents") + } + + if attribReceived.value() != 0 { + t.Fatal("did receive an unexpected attrib event after changing test file contents") + } + + modifyReceived.reset() + attribReceived.reset() + + // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents + // of the file are not changed though) + if err := os.Chmod(testFile, 0600); err != nil { + t.Fatalf("chmod failed: %s", err) + } + + time.Sleep(500 * time.Millisecond) + + if attribReceived.value() != 1 { + t.Fatal("didn't receive an attribute change after 500ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(1e9): + t.Fatal("event stream was not closed after 1 second") + } + + os.Remove(testFile) +} + +func TestFsnotifyClose(t *testing.T) { + watcher := newWatcher(t) + watcher.Close() + + var done int32 + go func() { + watcher.Close() + atomic.StoreInt32(&done, 1) + }() + + time.Sleep(50e6) // 50 ms + if atomic.LoadInt32(&done) == 0 { + t.Fatal("double Close() test failed: second Close() call didn't return") + } + + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + if err := watcher.Add(testDir); err == nil { + t.Fatal("expected error on Watch() after Close(), got nil") + } +} + +func TestFsnotifyFakeSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("symlinks don't work on Windows.") + } + + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + var errorsReceived counter + // Receive errors on the error channel on a separate goroutine + go func() { + for errors := range watcher.Errors { + t.Logf("Received error: %s", errors) + errorsReceived.increment() + } + }() + + // Count the CREATE events received + var createEventsReceived, otherEventsReceived counter + go func() { + for ev := range watcher.Events { + t.Logf("event received: %s", ev) + if ev.Op&Create == Create { + createEventsReceived.increment() + } else { + otherEventsReceived.increment() + } + } + }() + + addWatch(t, watcher, testDir) + + if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil { + t.Fatalf("Failed to create bogus symlink: %s", err) + } + t.Logf("Created bogus symlink") + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + + // Should not be error, just no events for broken links (watching nothing) + if errorsReceived.value() > 0 { + t.Fatal("fsnotify errors have been received.") + } + if otherEventsReceived.value() > 0 { + t.Fatal("fsnotify other events received on the broken link") + } + + // Except for 1 create event (for the link itself) + if createEventsReceived.value() == 0 { + t.Fatal("fsnotify create events were not received after 500 ms") + } + if createEventsReceived.value() > 1 { + t.Fatal("fsnotify more create events received than expected") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() +} + +func TestCyclicSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("symlinks don't work on Windows.") + } + + watcher := newWatcher(t) + + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + link := path.Join(testDir, "link") + if err := os.Symlink(".", link); err != nil { + t.Fatalf("could not make symlink: %v", err) + } + addWatch(t, watcher, testDir) + + var createEventsReceived counter + go func() { + for ev := range watcher.Events { + if ev.Op&Create == Create { + createEventsReceived.increment() + } + } + }() + + if err := os.Remove(link); err != nil { + t.Fatalf("Error removing link: %v", err) + } + + // It would be nice to be able to expect a delete event here, but kqueue has + // no way for us to get events on symlinks themselves, because opening them + // opens an fd to the file to which they point. + + if err := ioutil.WriteFile(link, []byte("foo"), 0700); err != nil { + t.Fatalf("could not make symlink: %v", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + + if got := createEventsReceived.value(); got == 0 { + t.Errorf("want at least 1 create event got %v", got) + } + + watcher.Close() +} + +// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race. +// See https://codereview.appspot.com/103300045/ +// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race +func TestConcurrentRemovalOfWatch(t *testing.T) { + if runtime.GOOS != "darwin" { + t.Skip("regression test for race only present on darwin") + } + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + watcher := newWatcher(t) + defer watcher.Close() + + addWatch(t, watcher, testDir) + + // Test that RemoveWatch can be invoked concurrently, with no data races. + removed1 := make(chan struct{}) + go func() { + defer close(removed1) + watcher.Remove(testDir) + }() + removed2 := make(chan struct{}) + go func() { + close(removed2) + watcher.Remove(testDir) + }() + <-removed1 + <-removed2 +} + +func TestClose(t *testing.T) { + // Regression test for #59 bad file descriptor from Close + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + watcher := newWatcher(t) + if err := watcher.Add(testDir); err != nil { + t.Fatalf("Expected no error on Add, got %v", err) + } + err := watcher.Close() + if err != nil { + t.Fatalf("Expected no error on Close, got %v.", err) + } +} + +// TestRemoveWithClose tests if one can handle Remove events and, at the same +// time, close Watcher object without any data races. +func TestRemoveWithClose(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + const fileN = 200 + tempFiles := make([]string, 0, fileN) + for i := 0; i < fileN; i++ { + tempFiles = append(tempFiles, tempMkFile(t, testDir)) + } + watcher := newWatcher(t) + if err := watcher.Add(testDir); err != nil { + t.Fatalf("Expected no error on Add, got %v", err) + } + startC, stopC := make(chan struct{}), make(chan struct{}) + errC := make(chan error) + go func() { + for { + select { + case <-watcher.Errors: + case <-watcher.Events: + case <-stopC: + return + } + } + }() + go func() { + <-startC + for _, fileName := range tempFiles { + os.Remove(fileName) + } + }() + go func() { + <-startC + errC <- watcher.Close() + }() + close(startC) + defer close(stopC) + if err := <-errC; err != nil { + t.Fatalf("Expected no error on Close, got %v.", err) + } +} + +func testRename(file1, file2 string) error { + switch runtime.GOOS { + case "windows", "plan9": + return os.Rename(file1, file2) + default: + cmd := exec.Command("mv", file1, file2) + return cmd.Run() + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 000000000..86e76a3d6 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 000000000..7d8de1451 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 000000000..9139e1716 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 000000000..09436f31d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md b/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..2d7fc4bf6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,21 @@ +### HCL Template +```hcl +# Place your HCL configuration file here +``` + +### Expected behavior +What should have happened? + +### Actual behavior +What actually happened? + +### Steps to reproduce +1. +2. +3. + +### References +Are there any other GitHub issues (open or closed) that should +be linked here? For example: +- GH-1234 +- ... diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore new file mode 100644 index 000000000..15586a2b5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -0,0 +1,9 @@ +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml new file mode 100644 index 000000000..cb63a3216 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -0,0 +1,13 @@ +sudo: false + +language: go + +go: + - 1.x + - tip + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 000000000..84fd743f5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -0,0 +1,18 @@ +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go get -t ./... + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 000000000..c8223326d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -0,0 +1,125 @@ +# HCL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + +HCL (HashiCorp Configuration Language) is a configuration language built +by HashiCorp. The goal of HCL is to build a structured configuration language +that is both human and machine friendly for use with command-line tools, but +specifically targeted towards DevOps tools, servers, etc. + +HCL is also fully JSON compatible. That is, JSON can be used as completely +valid input to a system expecting HCL. This helps makes systems +interoperable with other systems. + +HCL is heavily inspired by +[libucl](https://github.com/vstakhov/libucl), +nginx configuration, and others similar. + +## Why? + +A common question when viewing HCL is to ask the question: why not +JSON, YAML, etc.? + +Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) +used a variety of configuration languages from full programming languages +such as Ruby to complete data structure languages such as JSON. What we +learned is that some people wanted human-friendly configuration languages +and some people wanted machine-friendly languages. + +JSON fits a nice balance in this, but is fairly verbose and most +importantly doesn't support comments. With YAML, we found that beginners +had a really hard time determining what the actual structure was, and +ended up guessing more often than not whether to use a hyphen, colon, etc. +in order to represent some configuration key. + +Full programming languages such as Ruby enable complex behavior +a configuration language shouldn't usually allow, and also forces +people to learn some set of Ruby. + +Because of this, we decided to create our own configuration language +that is JSON-compatible. Our configuration language (HCL) is designed +to be written and modified by humans. The API for HCL allows JSON +as an input so that it is also machine-friendly (machines can generate +JSON instead of trying to generate HCL). + +Our goal with HCL is not to alienate other configuration languages. +It is instead to provide HCL as a specialized language for our tools, +and JSON as the interoperability layer. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. + + * Single line comments start with `#` or `//` + + * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments + are not allowed. A multi-line comment (also known as a block comment) + terminates at the first `*/` found. + + * Values are assigned with the syntax `key = value` (whitespace doesn't + matter). The value can be any primitive: a string, number, boolean, + object, or list. + + * Strings are double-quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Multi-line strings start with `<- + echo %Path% + + go version + + go env + + go get -t ./... + +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 000000000..bed9ebbe1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,729 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float32, reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int, reflect.Int32, reflect.Int64: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + + // if item is an object that was decoded from ambiguous JSON and + // flattened, make sure it's expanded if it needs to decode into a + // defined structure. + item := expandObject(item, val) + + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +// expandObject detects if an ambiguous JSON object was flattened to a List which +// should be decoded into a struct, and expands the ast to properly deocode. +func expandObject(node ast.Node, result reflect.Value) ast.Node { + item, ok := node.(*ast.ObjectItem) + if !ok { + return node + } + + elemType := result.Type() + + // our target type must be a struct + switch elemType.Kind() { + case reflect.Ptr: + switch elemType.Elem().Kind() { + case reflect.Struct: + //OK + default: + return node + } + case reflect.Struct: + //OK + default: + return node + } + + // A list value will have a key and field name. If it had more fields, + // it wouldn't have been flattened. + if len(item.Keys) != 2 { + return node + } + + keyToken := item.Keys[0].Token + item.Keys = item.Keys[1:] + + // we need to un-flatten the ast enough to decode + newNode := &ast.ObjectItem{ + Keys: []*ast.ObjectKey{ + &ast.ObjectKey{ + Token: keyToken, + }, + }, + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + + return newNode +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok && item != nil { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + + // Ignore fields with tag name "-" + if tagParts[0] == "-" { + continue + } + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for _, f := range fields { + field, fieldValue := f.field, f.val + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + fieldName := field.Name + + tagValue := field.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, fieldValue) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + fieldValue.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, fieldValue) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { + return err + } + } + + decodedFields = append(decodedFields, field.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/vendor/github.com/hashicorp/hcl/decoder_test.go b/vendor/github.com/hashicorp/hcl/decoder_test.go new file mode 100644 index 000000000..8682f470e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder_test.go @@ -0,0 +1,1203 @@ +package hcl + +import ( + "io/ioutil" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/hcl/ast" +) + +func TestDecode_interface(t *testing.T) { + cases := []struct { + File string + Err bool + Out interface{} + }{ + { + "basic.hcl", + false, + map[string]interface{}{ + "foo": "bar", + "bar": "${file(\"bing/bong.txt\")}", + }, + }, + { + "basic_squish.hcl", + false, + map[string]interface{}{ + "foo": "bar", + "bar": "${file(\"bing/bong.txt\")}", + "foo-bar": "baz", + }, + }, + { + "empty.hcl", + false, + map[string]interface{}{ + "resource": []map[string]interface{}{ + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{}, + }, + }, + }, + }, + }, + { + "tfvars.hcl", + false, + map[string]interface{}{ + "regularvar": "Should work", + "map.key1": "Value", + "map.key2": "Other value", + }, + }, + { + "escape.hcl", + false, + map[string]interface{}{ + "foo": "bar\"baz\\n", + "qux": "back\\slash", + "bar": "new\nline", + "qax": `slash\:colon`, + "nested": `${HH\\:mm\\:ss}`, + "nestedquotes": `${"\"stringwrappedinquotes\""}`, + }, + }, + { + "float.hcl", + false, + map[string]interface{}{ + "a": 1.02, + "b": 2, + }, + }, + { + "multiline_bad.hcl", + true, + nil, + }, + { + "multiline_literal.hcl", + true, + nil, + }, + { + "multiline_literal_with_hil.hcl", + false, + map[string]interface{}{"multiline_literal_with_hil": "${hello\n world}"}, + }, + { + "multiline_no_marker.hcl", + true, + nil, + }, + { + "multiline.hcl", + false, + map[string]interface{}{"foo": "bar\nbaz\n"}, + }, + { + "multiline_indented.hcl", + false, + map[string]interface{}{"foo": " bar\n baz\n"}, + }, + { + "multiline_no_hanging_indent.hcl", + false, + map[string]interface{}{"foo": " baz\n bar\n foo\n"}, + }, + { + "multiline_no_eof.hcl", + false, + map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"}, + }, + { + "multiline.json", + false, + map[string]interface{}{"foo": "bar\nbaz"}, + }, + { + "null_strings.json", + false, + map[string]interface{}{ + "module": []map[string]interface{}{ + map[string]interface{}{ + "app": []map[string]interface{}{ + map[string]interface{}{"foo": ""}, + }, + }, + }, + }, + }, + { + "scientific.json", + false, + map[string]interface{}{ + "a": 1e-10, + "b": 1e+10, + "c": 1e10, + "d": 1.2e-10, + "e": 1.2e+10, + "f": 1.2e10, + }, + }, + { + "scientific.hcl", + false, + map[string]interface{}{ + "a": 1e-10, + "b": 1e+10, + "c": 1e10, + "d": 1.2e-10, + "e": 1.2e+10, + "f": 1.2e10, + }, + }, + { + "terraform_heroku.hcl", + false, + map[string]interface{}{ + "name": "terraform-test-app", + "config_vars": []map[string]interface{}{ + map[string]interface{}{ + "FOO": "bar", + }, + }, + }, + }, + { + "structure_multi.hcl", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "baz": []map[string]interface{}{ + map[string]interface{}{"key": 7}, + }, + }, + map[string]interface{}{ + "bar": []map[string]interface{}{ + map[string]interface{}{"key": 12}, + }, + }, + }, + }, + }, + { + "structure_multi.json", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "baz": []map[string]interface{}{ + map[string]interface{}{"key": 7}, + }, + }, + map[string]interface{}{ + "bar": []map[string]interface{}{ + map[string]interface{}{"key": 12}, + }, + }, + }, + }, + }, + { + "list_of_lists.hcl", + false, + map[string]interface{}{ + "foo": []interface{}{ + []interface{}{"foo"}, + []interface{}{"bar"}, + }, + }, + }, + { + "list_of_maps.hcl", + false, + map[string]interface{}{ + "foo": []interface{}{ + map[string]interface{}{"somekey1": "someval1"}, + map[string]interface{}{"somekey2": "someval2", "someextrakey": "someextraval"}, + }, + }, + }, + { + "assign_deep.hcl", + false, + map[string]interface{}{ + "resource": []interface{}{ + map[string]interface{}{ + "foo": []interface{}{ + map[string]interface{}{ + "bar": []map[string]interface{}{ + map[string]interface{}{}}}}}}}, + }, + { + "structure_list.hcl", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "key": 7, + }, + map[string]interface{}{ + "key": 12, + }, + }, + }, + }, + { + "structure_list.json", + false, + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "key": 7, + }, + map[string]interface{}{ + "key": 12, + }, + }, + }, + }, + { + "structure_list_deep.json", + false, + map[string]interface{}{ + "bar": []map[string]interface{}{ + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "name": "terraform_example", + "ingress": []map[string]interface{}{ + map[string]interface{}{ + "from_port": 22, + }, + map[string]interface{}{ + "from_port": 80, + }, + }, + }, + }, + }, + }, + }, + }, + + { + "structure_list_empty.json", + false, + map[string]interface{}{ + "foo": []interface{}{}, + }, + }, + + { + "nested_block_comment.hcl", + false, + map[string]interface{}{ + "bar": "value", + }, + }, + + { + "unterminated_block_comment.hcl", + true, + nil, + }, + + { + "unterminated_brace.hcl", + true, + nil, + }, + + { + "nested_provider_bad.hcl", + true, + nil, + }, + + { + "object_list.json", + false, + map[string]interface{}{ + "resource": []map[string]interface{}{ + map[string]interface{}{ + "aws_instance": []map[string]interface{}{ + map[string]interface{}{ + "db": []map[string]interface{}{ + map[string]interface{}{ + "vpc": "foo", + "provisioner": []map[string]interface{}{ + map[string]interface{}{ + "file": []map[string]interface{}{ + map[string]interface{}{ + "source": "foo", + "destination": "bar", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + + // Terraform GH-8295 sanity test that basic decoding into + // interface{} works. + { + "terraform_variable_invalid.json", + false, + map[string]interface{}{ + "variable": []map[string]interface{}{ + map[string]interface{}{ + "whatever": "abc123", + }, + }, + }, + }, + + { + "interpolate.json", + false, + map[string]interface{}{ + "default": `${replace("europe-west", "-", " ")}`, + }, + }, + + { + "block_assign.hcl", + true, + nil, + }, + + { + "escape_backslash.hcl", + false, + map[string]interface{}{ + "output": []map[string]interface{}{ + map[string]interface{}{ + "one": `${replace(var.sub_domain, ".", "\\.")}`, + "two": `${replace(var.sub_domain, ".", "\\\\.")}`, + "many": `${replace(var.sub_domain, ".", "\\\\\\\\.")}`, + }, + }, + }, + }, + + { + "git_crypt.hcl", + true, + nil, + }, + + { + "object_with_bool.hcl", + false, + map[string]interface{}{ + "path": []map[string]interface{}{ + map[string]interface{}{ + "policy": "write", + "permissions": []map[string]interface{}{ + map[string]interface{}{ + "bool": []interface{}{false}, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.File, func(t *testing.T) { + d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File)) + if err != nil { + t.Fatalf("err: %s", err) + } + + var out interface{} + err = Decode(&out, string(d)) + if (err != nil) != tc.Err { + t.Fatalf("Input: %s\n\nError: %s", tc.File, err) + } + + if !reflect.DeepEqual(out, tc.Out) { + t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out) + } + + var v interface{} + err = Unmarshal(d, &v) + if (err != nil) != tc.Err { + t.Fatalf("Input: %s\n\nError: %s", tc.File, err) + } + + if !reflect.DeepEqual(v, tc.Out) { + t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out) + } + }) + } +} + +func TestDecode_interfaceInline(t *testing.T) { + cases := []struct { + Value string + Err bool + Out interface{} + }{ + {"t t e{{}}", true, nil}, + {"t=0t d {}", true, map[string]interface{}{"t": 0}}, + {"v=0E0v d{}", true, map[string]interface{}{"v": float64(0)}}, + } + + for _, tc := range cases { + t.Logf("Testing: %q", tc.Value) + + var out interface{} + err := Decode(&out, tc.Value) + if (err != nil) != tc.Err { + t.Fatalf("Input: %q\n\nError: %s", tc.Value, err) + } + + if !reflect.DeepEqual(out, tc.Out) { + t.Fatalf("Input: %q. Actual, Expected.\n\n%#v\n\n%#v", tc.Value, out, tc.Out) + } + + var v interface{} + err = Unmarshal([]byte(tc.Value), &v) + if (err != nil) != tc.Err { + t.Fatalf("Input: %q\n\nError: %s", tc.Value, err) + } + + if !reflect.DeepEqual(v, tc.Out) { + t.Fatalf("Input: %q. Actual, Expected.\n\n%#v\n\n%#v", tc.Value, out, tc.Out) + } + } +} + +func TestDecode_equal(t *testing.T) { + cases := []struct { + One, Two string + }{ + { + "basic.hcl", + "basic.json", + }, + { + "float.hcl", + "float.json", + }, + /* + { + "structure.hcl", + "structure.json", + }, + */ + { + "structure.hcl", + "structure_flat.json", + }, + { + "terraform_heroku.hcl", + "terraform_heroku.json", + }, + } + + for _, tc := range cases { + p1 := filepath.Join(fixtureDir, tc.One) + p2 := filepath.Join(fixtureDir, tc.Two) + + d1, err := ioutil.ReadFile(p1) + if err != nil { + t.Fatalf("err: %s", err) + } + + d2, err := ioutil.ReadFile(p2) + if err != nil { + t.Fatalf("err: %s", err) + } + + var i1, i2 interface{} + err = Decode(&i1, string(d1)) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = Decode(&i2, string(d2)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(i1, i2) { + t.Fatalf( + "%s != %s\n\n%#v\n\n%#v", + tc.One, tc.Two, + i1, i2) + } + } +} + +func TestDecode_flatMap(t *testing.T) { + var val map[string]map[string]string + + err := Decode(&val, testReadFile(t, "structure_flatmap.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]map[string]string{ + "foo": map[string]string{ + "foo": "bar", + "key": "7", + }, + } + + if !reflect.DeepEqual(val, expected) { + t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected) + } +} + +func TestDecode_structure(t *testing.T) { + type Embedded interface{} + + type V struct { + Embedded `hcl:"-"` + Key int + Foo string + } + + var actual V + + err := Decode(&actual, testReadFile(t, "flat.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := V{ + Key: 7, + Foo: "bar", + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected) + } +} + +func TestDecode_structurePtr(t *testing.T) { + type V struct { + Key int + Foo string + } + + var actual *V + + err := Decode(&actual, testReadFile(t, "flat.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &V{ + Key: 7, + Foo: "bar", + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected) + } +} + +func TestDecode_structureArray(t *testing.T) { + // This test is extracted from a failure in Consul (consul.io), + // hence the interesting structure naming. + + type KeyPolicyType string + + type KeyPolicy struct { + Prefix string `hcl:",key"` + Policy KeyPolicyType + } + + type Policy struct { + Keys []KeyPolicy `hcl:"key,expand"` + } + + expected := Policy{ + Keys: []KeyPolicy{ + KeyPolicy{ + Prefix: "", + Policy: "read", + }, + KeyPolicy{ + Prefix: "foo/", + Policy: "write", + }, + KeyPolicy{ + Prefix: "foo/bar/", + Policy: "read", + }, + KeyPolicy{ + Prefix: "foo/bar/baz", + Policy: "deny", + }, + }, + } + + files := []string{ + "decode_policy.hcl", + "decode_policy.json", + } + + for _, f := range files { + var actual Policy + + err := Decode(&actual, testReadFile(t, f)) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", f, err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) + } + } +} + +func TestDecode_sliceExpand(t *testing.T) { + type testInner struct { + Name string `hcl:",key"` + Key string + } + + type testStruct struct { + Services []testInner `hcl:"service,expand"` + } + + expected := testStruct{ + Services: []testInner{ + testInner{ + Name: "my-service-0", + Key: "value", + }, + testInner{ + Name: "my-service-1", + Key: "value", + }, + }, + } + + files := []string{ + "slice_expand.hcl", + } + + for _, f := range files { + t.Logf("Testing: %s", f) + + var actual testStruct + err := Decode(&actual, testReadFile(t, f)) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", f, err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) + } + } +} + +func TestDecode_structureMap(t *testing.T) { + // This test is extracted from a failure in Terraform (terraform.io), + // hence the interesting structure naming. + + type hclVariable struct { + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + type rawConfig struct { + Variable map[string]hclVariable + } + + expected := rawConfig{ + Variable: map[string]hclVariable{ + "foo": hclVariable{ + Default: "bar", + Description: "bar", + Fields: []string{"Default", "Description"}, + }, + + "amis": hclVariable{ + Default: []map[string]interface{}{ + map[string]interface{}{ + "east": "foo", + }, + }, + Fields: []string{"Default"}, + }, + }, + } + + files := []string{ + "decode_tf_variable.hcl", + "decode_tf_variable.json", + } + + for _, f := range files { + t.Logf("Testing: %s", f) + + var actual rawConfig + err := Decode(&actual, testReadFile(t, f)) + if err != nil { + t.Fatalf("Input: %s\n\nerr: %s", f, err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) + } + } +} + +func TestDecode_structureMapInvalid(t *testing.T) { + // Terraform GH-8295 + + type hclVariable struct { + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + type rawConfig struct { + Variable map[string]*hclVariable + } + + var actual rawConfig + err := Decode(&actual, testReadFile(t, "terraform_variable_invalid.json")) + if err == nil { + t.Fatal("expected error") + } +} + +func TestDecode_interfaceNonPointer(t *testing.T) { + var value interface{} + err := Decode(value, testReadFile(t, "basic_int_string.hcl")) + if err == nil { + t.Fatal("should error") + } +} + +func TestDecode_intString(t *testing.T) { + var value struct { + Count int + } + + err := Decode(&value, testReadFile(t, "basic_int_string.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if value.Count != 3 { + t.Fatalf("bad: %#v", value.Count) + } +} + +func TestDecode_float32(t *testing.T) { + var value struct { + A float32 `hcl:"a"` + B float32 `hcl:"b"` + } + + err := Decode(&value, testReadFile(t, "float.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if got, want := value.A, float32(1.02); got != want { + t.Fatalf("wrong result %#v; want %#v", got, want) + } + if got, want := value.B, float32(2); got != want { + t.Fatalf("wrong result %#v; want %#v", got, want) + } +} + +func TestDecode_float64(t *testing.T) { + var value struct { + A float64 `hcl:"a"` + B float64 `hcl:"b"` + } + + err := Decode(&value, testReadFile(t, "float.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if got, want := value.A, float64(1.02); got != want { + t.Fatalf("wrong result %#v; want %#v", got, want) + } + if got, want := value.B, float64(2); got != want { + t.Fatalf("wrong result %#v; want %#v", got, want) + } +} + +func TestDecode_intStringAliased(t *testing.T) { + var value struct { + Count time.Duration + } + + err := Decode(&value, testReadFile(t, "basic_int_string.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if value.Count != time.Duration(3) { + t.Fatalf("bad: %#v", value.Count) + } +} + +func TestDecode_Node(t *testing.T) { + // given + var value struct { + Content ast.Node + Nested struct { + Content ast.Node + } + } + + content := ` +content { + hello = "world" +} +` + + // when + err := Decode(&value, content) + + // then + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + // verify ast.Node can be decoded later + var v map[string]interface{} + err = DecodeObject(&v, value.Content) + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + if v["hello"] != "world" { + t.Errorf("expected mapping to be returned") + } +} + +func TestDecode_NestedNode(t *testing.T) { + // given + var value struct { + Nested struct { + Content ast.Node + } + } + + content := ` +nested "content" { + hello = "world" +} +` + + // when + err := Decode(&value, content) + + // then + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + // verify ast.Node can be decoded later + var v map[string]interface{} + err = DecodeObject(&v, value.Nested.Content) + if err != nil { + t.Errorf("unable to decode content, %v", err) + return + } + + if v["hello"] != "world" { + t.Errorf("expected mapping to be returned") + } +} + +// https://github.com/hashicorp/hcl/issues/60 +func TestDecode_topLevelKeys(t *testing.T) { + type Template struct { + Source string + } + + templates := struct { + Templates []*Template `hcl:"template"` + }{} + + err := Decode(&templates, ` + template { + source = "blah" + } + + template { + source = "blahblah" + }`) + + if err != nil { + t.Fatal(err) + } + + if templates.Templates[0].Source != "blah" { + t.Errorf("bad source: %s", templates.Templates[0].Source) + } + + if templates.Templates[1].Source != "blahblah" { + t.Errorf("bad source: %s", templates.Templates[1].Source) + } +} + +func TestDecode_flattenedJSON(t *testing.T) { + // make sure we can also correctly extract a Name key too + type V struct { + Name string `hcl:",key"` + Description string + Default map[string]string + } + type Vars struct { + Variable []*V + } + + cases := []struct { + JSON string + Out interface{} + Expected interface{} + }{ + { // Nested object, no sibling keys + JSON: ` +{ + "var_name": { + "default": { + "key1": "a", + "key2": "b" + } + } +} + `, + Out: &[]*V{}, + Expected: &[]*V{ + &V{ + Name: "var_name", + Default: map[string]string{"key1": "a", "key2": "b"}, + }, + }, + }, + + { // Nested object with a sibling key (this worked previously) + JSON: ` +{ + "var_name": { + "description": "Described", + "default": { + "key1": "a", + "key2": "b" + } + } +} + `, + Out: &[]*V{}, + Expected: &[]*V{ + &V{ + Name: "var_name", + Description: "Described", + Default: map[string]string{"key1": "a", "key2": "b"}, + }, + }, + }, + + { // Multiple nested objects, one with a sibling key + JSON: ` +{ + "variable": { + "var_1": { + "default": { + "key1": "a", + "key2": "b" + } + }, + "var_2": { + "description": "Described", + "default": { + "key1": "a", + "key2": "b" + } + } + } +} + `, + Out: &Vars{}, + Expected: &Vars{ + Variable: []*V{ + &V{ + Name: "var_1", + Default: map[string]string{"key1": "a", "key2": "b"}, + }, + &V{ + Name: "var_2", + Description: "Described", + Default: map[string]string{"key1": "a", "key2": "b"}, + }, + }, + }, + }, + + { // Nested object to maps + JSON: ` +{ + "variable": { + "var_name": { + "description": "Described", + "default": { + "key1": "a", + "key2": "b" + } + } + } +} + `, + Out: &[]map[string]interface{}{}, + Expected: &[]map[string]interface{}{ + { + "variable": []map[string]interface{}{ + { + "var_name": []map[string]interface{}{ + { + "description": "Described", + "default": []map[string]interface{}{ + { + "key1": "a", + "key2": "b", + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { // Nested object to maps without a sibling key should decode the same as above + JSON: ` +{ + "variable": { + "var_name": { + "default": { + "key1": "a", + "key2": "b" + } + } + } +} + `, + Out: &[]map[string]interface{}{}, + Expected: &[]map[string]interface{}{ + { + "variable": []map[string]interface{}{ + { + "var_name": []map[string]interface{}{ + { + "default": []map[string]interface{}{ + { + "key1": "a", + "key2": "b", + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { // Nested objects, one with a sibling key, and one without + JSON: ` +{ + "variable": { + "var_1": { + "default": { + "key1": "a", + "key2": "b" + } + }, + "var_2": { + "description": "Described", + "default": { + "key1": "a", + "key2": "b" + } + } + } +} + `, + Out: &[]map[string]interface{}{}, + Expected: &[]map[string]interface{}{ + { + "variable": []map[string]interface{}{ + { + "var_1": []map[string]interface{}{ + { + "default": []map[string]interface{}{ + { + "key1": "a", + "key2": "b", + }, + }, + }, + }, + }, + }, + }, + { + "variable": []map[string]interface{}{ + { + "var_2": []map[string]interface{}{ + { + "description": "Described", + "default": []map[string]interface{}{ + { + "key1": "a", + "key2": "b", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for i, tc := range cases { + err := Decode(tc.Out, tc.JSON) + if err != nil { + t.Fatalf("[%d] err: %s", i, err) + } + + if !reflect.DeepEqual(tc.Out, tc.Expected) { + t.Fatalf("[%d]\ngot: %s\nexpected: %s\n", i, spew.Sdump(tc.Out), spew.Sdump(tc.Expected)) + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 000000000..575a20b50 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 000000000..6e5ef654b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,219 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // I'm not entirely sure what causes this, but removing this causes + // a test failure. We should investigate at some point. + if len(o.Keys) == 0 { + return token.Pos{} + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go new file mode 100644 index 000000000..d4364a10f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go @@ -0,0 +1,200 @@ +package ast + +import ( + "reflect" + "strings" + "testing" + + "github.com/hashicorp/hcl/hcl/token" +) + +func TestObjectListFilter(t *testing.T) { + var cases = []struct { + Filter []string + Input []*ObjectItem + Output []*ObjectItem + }{ + { + []string{"foo"}, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{ + Token: token.Token{Type: token.STRING, Text: `"foo"`}, + }, + }, + }, + }, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{}, + }, + }, + }, + + { + []string{"foo"}, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + }, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + }, + }, + } + + for _, tc := range cases { + input := &ObjectList{Items: tc.Input} + expected := &ObjectList{Items: tc.Output} + if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) { + t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual) + } + } +} + +func TestWalk(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}}, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + order := []string{ + "*ast.ObjectList", + "*ast.ObjectItem", + "*ast.ObjectKey", + "*ast.ObjectKey", + "*ast.LiteralType", + "*ast.ObjectItem", + "*ast.ObjectKey", + } + count := 0 + + Walk(node, func(n Node) (Node, bool) { + if n == nil { + return n, false + } + + typeName := reflect.TypeOf(n).String() + if order[count] != typeName { + t.Errorf("expected '%s' got: '%s'", order[count], typeName) + } + count++ + return n, true + }) +} + +func TestWalkEquality(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + rewritten := Walk(node, func(n Node) (Node, bool) { return n, true }) + + newNode, ok := rewritten.(*ObjectList) + if !ok { + t.Fatalf("expected Objectlist, got %T", rewritten) + } + + if !reflect.DeepEqual(node, newNode) { + t.Fatal("rewritten node is not equal to the given node") + } + + if len(newNode.Items) != 2 { + t.Errorf("expected newNode length 2, got: %d", len(newNode.Items)) + } + + expected := []string{ + `"foo"`, + `"bar"`, + } + + for i, item := range newNode.Items { + if len(item.Keys) != 1 { + t.Errorf("expected keys newNode length 1, got: %d", len(item.Keys)) + } + + if item.Keys[0].Token.Text != expected[i] { + t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text) + } + + if item.Val != nil { + t.Errorf("expected item value should be nil") + } + } +} + +func TestWalkRewrite(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + suffix := "_example" + node = Walk(node, func(n Node) (Node, bool) { + switch i := n.(type) { + case *ObjectKey: + i.Token.Text = i.Token.Text + suffix + n = i + } + return n, true + }).(*ObjectList) + + Walk(node, func(n Node) (Node, bool) { + switch i := n.(type) { + case *ObjectKey: + if !strings.HasSuffix(i.Token.Text, suffix) { + t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix) + } + } + return n, true + }) + +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 000000000..ba07ad42b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go new file mode 100644 index 000000000..2380d71e3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go @@ -0,0 +1,162 @@ +// Derivative work from: +// - https://golang.org/src/cmd/gofmt/gofmt.go +// - https://github.com/fatih/hclfmt + +package fmtcmd + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/hcl/printer" +) + +var ( + ErrWriteStdin = errors.New("cannot use write option with standard input") +) + +type Options struct { + List bool // list files whose formatting differs + Write bool // write result to (source) file instead of stdout + Diff bool // display diffs of formatting changes +} + +func isValidFile(f os.FileInfo, extensions []string) bool { + if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") { + for _, ext := range extensions { + if strings.HasSuffix(f.Name(), "."+ext) { + return true + } + } + } + + return false +} + +// If in == nil, the source is the contents of the file with the given filename. +func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error { + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + in = f + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + res, err := printer.Format(src) + if err != nil { + return fmt.Errorf("In %s: %s", filename, err) + } + + if !bytes.Equal(src, res) { + // formatting has changed + if opts.List { + fmt.Fprintln(out, filename) + } + if opts.Write { + err = ioutil.WriteFile(filename, res, 0644) + if err != nil { + return err + } + } + if opts.Diff { + data, err := diff(src, res) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename) + out.Write(data) + } + } + + if !opts.List && !opts.Write && !opts.Diff { + _, err = out.Write(res) + } + + return err +} + +func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error { + visitFile := func(path string, f os.FileInfo, err error) error { + if err == nil && isValidFile(f, extensions) { + err = processFile(path, nil, stdout, false, opts) + } + return err + } + + return filepath.Walk(path, visitFile) +} + +func Run( + paths, extensions []string, + stdin io.Reader, + stdout io.Writer, + opts Options, +) error { + if len(paths) == 0 { + if opts.Write { + return ErrWriteStdin + } + if err := processFile("", stdin, stdout, true, opts); err != nil { + return err + } + return nil + } + + for _, path := range paths { + switch dir, err := os.Stat(path); { + case err != nil: + return err + case dir.IsDir(): + if err := walkDir(path, extensions, stdout, opts); err != nil { + return err + } + default: + if err := processFile(path, nil, stdout, false, opts); err != nil { + return err + } + } + } + + return nil +} + +func diff(b1, b2 []byte) (data []byte, err error) { + f1, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f1.Name()) + defer f1.Close() + + f2, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f2.Name()) + defer f2.Close() + + f1.Write(b1) + f2.Write(b2) + + data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go new file mode 100644 index 000000000..66bed581c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go @@ -0,0 +1,440 @@ +// +build !windows +// TODO(jen20): These need fixing on Windows but fmt is not used right now +// and red CI is making it harder to process other bugs, so ignore until +// we get around to fixing them. + +package fmtcmd + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "sort" + "syscall" + "testing" + + "github.com/hashicorp/hcl/testhelper" +) + +var fixtureExtensions = []string{"hcl"} + +func init() { + sort.Sort(ByFilename(fixtures)) +} + +func TestIsValidFile(t *testing.T) { + const fixtureDir = "./test-fixtures" + + cases := []struct { + Path string + Expected bool + }{ + {"good.hcl", true}, + {".hidden.ignore", false}, + {"file.ignore", false}, + {"dir.ignore", false}, + } + + for _, tc := range cases { + file, err := os.Stat(filepath.Join(fixtureDir, tc.Path)) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if res := isValidFile(file, fixtureExtensions); res != tc.Expected { + t.Errorf("want: %t, got: %t", tc.Expected, res) + } + } +} + +func TestRunMultiplePaths(t *testing.T) { + path1, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path1) + path2, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path2) + + var expectedOut bytes.Buffer + for _, path := range []string{path1, path2} { + for _, fixture := range fixtures { + if !bytes.Equal(fixture.golden, fixture.input) { + expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n") + } + } + } + + _, stdout := mockIO() + err = Run( + []string{path1, path2}, + fixtureExtensions, + nil, stdout, + Options{ + List: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) + } +} + +func TestRunSubDirectories(t *testing.T) { + pathParent, err := ioutil.TempDir("", "") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(pathParent) + + path1, err := renderFixtures(pathParent) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + path2, err := renderFixtures(pathParent) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + paths := []string{path1, path2} + sort.Strings(paths) + + var expectedOut bytes.Buffer + for _, path := range paths { + for _, fixture := range fixtures { + if !bytes.Equal(fixture.golden, fixture.input) { + expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n") + } + } + } + + _, stdout := mockIO() + err = Run( + []string{pathParent}, + fixtureExtensions, + nil, stdout, + Options{ + List: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) + } +} + +func TestRunStdin(t *testing.T) { + var expectedOut bytes.Buffer + for i, fixture := range fixtures { + if i != 0 { + expectedOut.WriteString("\n") + } + expectedOut.Write(fixture.golden) + } + + stdin, stdout := mockIO() + for _, fixture := range fixtures { + stdin.Write(fixture.input) + } + + err := Run( + []string{}, + fixtureExtensions, + stdin, stdout, + Options{}, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) + } +} + +func TestRunStdinAndWrite(t *testing.T) { + var expectedOut = []byte{} + + stdin, stdout := mockIO() + stdin.WriteString("") + err := Run( + []string{}, []string{}, + stdin, stdout, + Options{ + Write: true, + }, + ) + + if err != ErrWriteStdin { + t.Errorf("error want:\n%s\ngot:\n%s", ErrWriteStdin, err) + } + if !bytes.Equal(stdout.Bytes(), expectedOut) { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunFileError(t *testing.T) { + path, err := ioutil.TempDir("", "") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + filename := filepath.Join(path, "unreadable.hcl") + + var expectedError = &os.PathError{ + Op: "open", + Path: filename, + Err: syscall.EACCES, + } + + err = ioutil.WriteFile(filename, []byte{}, 0000) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{}, + ) + + if !reflect.DeepEqual(err, expectedError) { + t.Errorf("error want: %#v, got: %#v", expectedError, err) + } +} + +func TestRunNoOptions(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + var expectedOut bytes.Buffer + for _, fixture := range fixtures { + expectedOut.Write(fixture.golden) + } + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{}, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) + } +} + +func TestRunList(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + var expectedOut bytes.Buffer + for _, fixture := range fixtures { + if !bytes.Equal(fixture.golden, fixture.input) { + expectedOut.WriteString(fmt.Sprintln(filepath.Join(path, fixture.filename))) + } + } + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{ + List: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) + } +} + +func TestRunWrite(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{ + Write: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + for _, fixture := range fixtures { + res, err := ioutil.ReadFile(filepath.Join(path, fixture.filename)) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if !bytes.Equal(res, fixture.golden) { + t.Errorf("file %q contents want:\n%s\ngot:\n%s", fixture.filename, fixture.golden, res) + } + } +} + +func TestRunDiff(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + var expectedOut bytes.Buffer + for _, fixture := range fixtures { + if len(fixture.diff) > 0 { + expectedOut.WriteString( + regexp.QuoteMeta( + fmt.Sprintf("diff a/%s/%s b/%s/%s\n", path, fixture.filename, path, fixture.filename), + ), + ) + // Need to use regex to ignore datetimes in diff. + expectedOut.WriteString(`--- .+?\n`) + expectedOut.WriteString(`\+\+\+ .+?\n`) + expectedOut.WriteString(regexp.QuoteMeta(string(fixture.diff))) + } + } + + expectedOutString := testhelper.Unix2dos(expectedOut.String()) + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{ + Diff: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if !regexp.MustCompile(expectedOutString).Match(stdout.Bytes()) { + t.Errorf("stdout want match:\n%s\ngot:\n%q", expectedOutString, stdout) + } +} + +func mockIO() (stdin, stdout *bytes.Buffer) { + return new(bytes.Buffer), new(bytes.Buffer) +} + +type fixture struct { + filename string + input, golden, diff []byte +} + +type ByFilename []fixture + +func (s ByFilename) Len() int { return len(s) } +func (s ByFilename) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByFilename) Less(i, j int) bool { return len(s[i].filename) > len(s[j].filename) } + +var fixtures = []fixture{ + { + "noop.hcl", + []byte(`resource "aws_security_group" "firewall" { + count = 5 +} +`), + []byte(`resource "aws_security_group" "firewall" { + count = 5 +} +`), + []byte(``), + }, { + "align_equals.hcl", + []byte(`variable "foo" { + default = "bar" + description = "bar" +} +`), + []byte(`variable "foo" { + default = "bar" + description = "bar" +} +`), + []byte(`@@ -1,4 +1,4 @@ + variable "foo" { +- default = "bar" ++ default = "bar" + description = "bar" + } +`), + }, { + "indentation.hcl", + []byte(`provider "aws" { + access_key = "foo" + secret_key = "bar" +} +`), + []byte(`provider "aws" { + access_key = "foo" + secret_key = "bar" +} +`), + []byte(`@@ -1,4 +1,4 @@ + provider "aws" { +- access_key = "foo" +- secret_key = "bar" ++ access_key = "foo" ++ secret_key = "bar" + } +`), + }, +} + +// parent can be an empty string, in which case the system's default +// temporary directory will be used. +func renderFixtures(parent string) (path string, err error) { + path, err = ioutil.TempDir(parent, "") + if err != nil { + return "", err + } + + for _, fixture := range fixtures { + err = ioutil.WriteFile(filepath.Join(path, fixture.filename), []byte(fixture.input), 0644) + if err != nil { + os.RemoveAll(path) + return "", err + } + } + + return path, nil +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore new file mode 100644 index 000000000..9977a2836 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore @@ -0,0 +1 @@ +invalid diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore new file mode 100644 index 000000000..9977a2836 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore @@ -0,0 +1 @@ +invalid diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 000000000..5c99381df --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go new file mode 100644 index 000000000..32399fec5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go @@ -0,0 +1,9 @@ +package parser + +import ( + "testing" +) + +func TestPosError_impl(t *testing.T) { + var _ error = new(PosError) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 000000000..64c83bcfb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,532 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")), + } + } + + // key=#comment + // val + if p.lineComment != nil { + o.LineComment, p.lineComment = p.lineComment, nil + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), + } + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.LBRACK: + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go new file mode 100644 index 000000000..270212207 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go @@ -0,0 +1,575 @@ +package parser + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +func TestType(t *testing.T) { + var literals = []struct { + typ token.Type + src string + }{ + {token.STRING, `foo = "foo"`}, + {token.NUMBER, `foo = 123`}, + {token.NUMBER, `foo = -29`}, + {token.FLOAT, `foo = 123.12`}, + {token.FLOAT, `foo = -123.12`}, + {token.BOOL, `foo = true`}, + {token.HEREDOC, "foo = <= 0 { + result = p.heredocIndent(result) + } + } + + return result +} + +// objectItem returns the printable HCL form of an object item. An object type +// starts with one/multiple keys and has a value. The value might be of any +// type. +func (p *printer) objectItem(o *ast.ObjectItem) []byte { + defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) + var buf bytes.Buffer + + if o.LeadComment != nil { + for _, comment := range o.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + // If key and val are on different lines, treat line comments like lead comments. + if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { + for _, comment := range o.LineComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range o.Keys { + buf.WriteString(k.Token.Text) + buf.WriteByte(blank) + + // reach end of key + if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + buf.Write(p.output(o.Val)) + + if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { + buf.WriteByte(blank) + for _, comment := range o.LineComment.List { + buf.WriteString(comment.Text) + } + } + + return buf.Bytes() +} + +// objectType returns the printable HCL form of an object type. An object type +// begins with a brace and ends with a brace. +func (p *printer) objectType(o *ast.ObjectType) []byte { + defer un(trace(p, "ObjectType")) + var buf bytes.Buffer + buf.WriteString("{") + + var index int + var nextItem token.Pos + var commented, newlinePrinted bool + for { + // Determine the location of the next actual non-comment + // item. If we're at the end, the next item is the closing brace + if index != len(o.List.Items) { + nextItem = o.List.Items[index].Pos() + } else { + nextItem = o.Rbrace + } + + // Go through the standalone comments in the file and print out + // the comments that we should be for this object item. + for _, c := range p.standaloneComments { + printed := false + var lastCommentPos token.Pos + for _, comment := range c.List { + // We only care about comments after the previous item + // we've printed so that comments are printed in the + // correct locations (between two objects for example). + // And before the next item. + if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { + // If there are standalone comments and the initial newline has not + // been printed yet, do it now. + if !newlinePrinted { + newlinePrinted = true + buf.WriteByte(newline) + } + + // add newline if it's between other printed nodes + if index > 0 { + commented = true + buf.WriteByte(newline) + } + + // Store this position + lastCommentPos = comment.Pos() + + // output the comment itself + buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) + + // Set printed to true to note that we printed something + printed = true + + /* + if index != len(o.List.Items) { + buf.WriteByte(newline) // do not print on the end + } + */ + } + } + + // Stuff to do if we had comments + if printed { + // Always write a newline + buf.WriteByte(newline) + + // If there is another item in the object and our comment + // didn't hug it directly, then make sure there is a blank + // line separating them. + if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { + buf.WriteByte(newline) + } + } + } + + if index == len(o.List.Items) { + p.prev = o.Rbrace + break + } + + // At this point we are sure that it's not a totally empty block: print + // the initial newline if it hasn't been printed yet by the previous + // block about standalone comments. + if !newlinePrinted { + buf.WriteByte(newline) + newlinePrinted = true + } + + // check if we have adjacent one liner items. If yes we'll going to align + // the comments. + var aligned []*ast.ObjectItem + for _, item := range o.List.Items[index:] { + // we don't group one line lists + if len(o.List.Items) == 1 { + break + } + + // one means a oneliner with out any lead comment + // two means a oneliner with lead comment + // anything else might be something else + cur := lines(string(p.objectItem(item))) + if cur > 2 { + break + } + + curPos := item.Pos() + + nextPos := token.Pos{} + if index != len(o.List.Items)-1 { + nextPos = o.List.Items[index+1].Pos() + } + + prevPos := token.Pos{} + if index != 0 { + prevPos = o.List.Items[index-1].Pos() + } + + // fmt.Println("DEBUG ----------------") + // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) + // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) + // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) + + if curPos.Line+1 == nextPos.Line { + aligned = append(aligned, item) + index++ + continue + } + + if curPos.Line-1 == prevPos.Line { + aligned = append(aligned, item) + index++ + + // finish if we have a new line or comment next. This happens + // if the next item is not adjacent + if curPos.Line+1 != nextPos.Line { + break + } + continue + } + + break + } + + // put newlines if the items are between other non aligned items. + // newlines are also added if there is a standalone comment already, so + // check it too + if !commented && index != len(aligned) { + buf.WriteByte(newline) + } + + if len(aligned) >= 1 { + p.prev = aligned[len(aligned)-1].Pos() + + items := p.alignedItems(aligned) + buf.Write(p.indent(items)) + } else { + p.prev = o.List.Items[index].Pos() + + buf.Write(p.indent(p.objectItem(o.List.Items[index]))) + index++ + } + + buf.WriteByte(newline) + } + + buf.WriteString("}") + return buf.Bytes() +} + +func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { + var buf bytes.Buffer + + // find the longest key and value length, needed for alignment + var longestKeyLen int // longest key length + var longestValLen int // longest value length + for _, item := range items { + key := len(item.Keys[0].Token.Text) + val := len(p.output(item.Val)) + + if key > longestKeyLen { + longestKeyLen = key + } + + if val > longestValLen { + longestValLen = val + } + } + + for i, item := range items { + if item.LeadComment != nil { + for _, comment := range item.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range item.Keys { + keyLen := len(k.Token.Text) + buf.WriteString(k.Token.Text) + for i := 0; i < longestKeyLen-keyLen+1; i++ { + buf.WriteByte(blank) + } + + // reach end of key + if i == len(item.Keys)-1 && len(item.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + val := p.output(item.Val) + valLen := len(val) + buf.Write(val) + + if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { + for i := 0; i < longestValLen-valLen+1; i++ { + buf.WriteByte(blank) + } + + for _, comment := range item.LineComment.List { + buf.WriteString(comment.Text) + } + } + + // do not print for the last item + if i != len(items)-1 { + buf.WriteByte(newline) + } + } + + return buf.Bytes() +} + +// list returns the printable HCL form of an list type. +func (p *printer) list(l *ast.ListType) []byte { + if p.isSingleLineList(l) { + return p.singleLineList(l) + } + + var buf bytes.Buffer + buf.WriteString("[") + buf.WriteByte(newline) + + var longestLine int + for _, item := range l.List { + // for now we assume that the list only contains literal types + if lit, ok := item.(*ast.LiteralType); ok { + lineLen := len(lit.Token.Text) + if lineLen > longestLine { + longestLine = lineLen + } + } + } + + haveEmptyLine := false + for i, item := range l.List { + // If we have a lead comment, then we want to write that first + leadComment := false + if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { + leadComment = true + + // Ensure an empty line before every element with a + // lead comment (except the first item in a list). + if !haveEmptyLine && i != 0 { + buf.WriteByte(newline) + } + + for _, comment := range lit.LeadComment.List { + buf.Write(p.indent([]byte(comment.Text))) + buf.WriteByte(newline) + } + } + + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(p.indent(val)) + + // if this item is a heredoc, then we output the comma on + // the next line. This is the only case this happens. + comma := []byte{','} + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + buf.WriteByte(newline) + comma = p.indent(comma) + } + + buf.Write(comma) + + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) + } + + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) + } + } + + buf.WriteByte(newline) + + // Ensure an empty line after every element with a + // lead comment (except the first item in a list). + haveEmptyLine = leadComment && i != len(l.List)-1 + if haveEmptyLine { + buf.WriteByte(newline) + } + } + + buf.WriteString("]") + return buf.Bytes() +} + +// isSingleLineList returns true if: +// * they were previously formatted entirely on one line +// * they consist entirely of literals +// * there are either no heredoc strings or the list has exactly one element +// * there are no line comments +func (printer) isSingleLineList(l *ast.ListType) bool { + for _, item := range l.List { + if item.Pos().Line != l.Lbrack.Line { + return false + } + + lit, ok := item.(*ast.LiteralType) + if !ok { + return false + } + + if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { + return false + } + + if lit.LineComment != nil { + return false + } + } + + return true +} + +// singleLineList prints a simple single line list. +// For a definition of "simple", see isSingleLineList above. +func (p *printer) singleLineList(l *ast.ListType) []byte { + buf := &bytes.Buffer{} + + buf.WriteString("[") + for i, item := range l.List { + if i != 0 { + buf.WriteString(", ") + } + + // Output the item itself + buf.Write(p.output(item)) + + // The heredoc marker needs to be at the end of line. + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + buf.WriteByte(newline) + } + } + + buf.WriteString("]") + return buf.Bytes() +} + +// indent indents the lines of the given buffer for each non-empty line +func (p *printer) indent(buf []byte) []byte { + var prefix []byte + if p.cfg.SpacesWidth != 0 { + for i := 0; i < p.cfg.SpacesWidth; i++ { + prefix = append(prefix, blank) + } + } else { + prefix = []byte{tab} + } + + var res []byte + bol := true + for _, c := range buf { + if bol && c != '\n' { + res = append(res, prefix...) + } + + res = append(res, c) + bol = c == '\n' + } + return res +} + +// unindent removes all the indentation from the tombstoned lines +func (p *printer) unindent(buf []byte) []byte { + var res []byte + for i := 0; i < len(buf); i++ { + skip := len(buf)-i <= len(unindent) + if !skip { + skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) + } + if skip { + res = append(res, buf[i]) + continue + } + + // We have a marker. we have to backtrace here and clean out + // any whitespace ahead of our tombstone up to a \n + for j := len(res) - 1; j >= 0; j-- { + if res[j] == '\n' { + break + } + + res = res[:j] + } + + // Skip the entire unindent marker + i += len(unindent) - 1 + } + + return res +} + +// heredocIndent marks all the 2nd and further lines as unindentable +func (p *printer) heredocIndent(buf []byte) []byte { + var res []byte + bol := false + for _, c := range buf { + if bol && c != '\n' { + res = append(res, unindent...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// isSingleLineObject tells whether the given object item is a single +// line object such as "obj {}". +// +// A single line object: +// +// * has no lead comments (hence multi-line) +// * has no assignment +// * has no values in the stanza (within {}) +// +func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { + // If there is a lead comment, can't be one line + if val.LeadComment != nil { + return false + } + + // If there is assignment, we always break by line + if val.Assign.IsValid() { + return false + } + + // If it isn't an object type, then its not a single line object + ot, ok := val.Val.(*ast.ObjectType) + if !ok { + return false + } + + // If the object has no items, it is single line! + return len(ot.List.Items) == 0 +} + +func lines(txt string) int { + endline := 1 + for i := 0; i < len(txt); i++ { + if txt[i] == '\n' { + endline++ + } + } + return endline +} + +// ---------------------------------------------------------------------------- +// Tracing support + +func (p *printer) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + i := 2 * p.indentTrace + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *printer, msg string) *printer { + p.printTrace(msg, "(") + p.indentTrace++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *printer) { + p.indentTrace-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go new file mode 100644 index 000000000..6617ab8e7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go @@ -0,0 +1,66 @@ +// Package printer implements printing of AST nodes to HCL format. +package printer + +import ( + "bytes" + "io" + "text/tabwriter" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" +) + +var DefaultConfig = Config{ + SpacesWidth: 2, +} + +// A Config node controls the output of Fprint. +type Config struct { + SpacesWidth int // if set, it will use spaces instead of tabs for alignment +} + +func (c *Config) Fprint(output io.Writer, node ast.Node) error { + p := &printer{ + cfg: *c, + comments: make([]*ast.CommentGroup, 0), + standaloneComments: make([]*ast.CommentGroup, 0), + // enableTrace: true, + } + + p.collectComments(node) + + if _, err := output.Write(p.unindent(p.output(node))); err != nil { + return err + } + + // flush tabwriter, if any + var err error + if tw, _ := output.(*tabwriter.Writer); tw != nil { + err = tw.Flush() + } + + return err +} + +// Fprint "pretty-prints" an HCL node to output +// It calls Config.Fprint with default settings. +func Fprint(output io.Writer, node ast.Node) error { + return DefaultConfig.Fprint(output, node) +} + +// Format formats src HCL and returns the result. +func Format(src []byte) ([]byte, error) { + node, err := parser.Parse(src) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + if err := DefaultConfig.Fprint(&buf, node); err != nil { + return nil, err + } + + // Add trailing newline to result + buf.WriteString("\n") + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go new file mode 100644 index 000000000..8ae747ab8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go @@ -0,0 +1,176 @@ +package printer + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/hashicorp/hcl/hcl/parser" +) + +var update = flag.Bool("update", false, "update golden files") + +const ( + dataDir = "testdata" +) + +type entry struct { + source, golden string +} + +// Use go test -update to create/update the respective golden files. +var data = []entry{ + {"complexhcl.input", "complexhcl.golden"}, + {"list.input", "list.golden"}, + {"list_comment.input", "list_comment.golden"}, + {"comment.input", "comment.golden"}, + {"comment_crlf.input", "comment.golden"}, + {"comment_aligned.input", "comment_aligned.golden"}, + {"comment_array.input", "comment_array.golden"}, + {"comment_end_file.input", "comment_end_file.golden"}, + {"comment_multiline_indent.input", "comment_multiline_indent.golden"}, + {"comment_multiline_no_stanza.input", "comment_multiline_no_stanza.golden"}, + {"comment_multiline_stanza.input", "comment_multiline_stanza.golden"}, + {"comment_newline.input", "comment_newline.golden"}, + {"comment_object_multi.input", "comment_object_multi.golden"}, + {"comment_standalone.input", "comment_standalone.golden"}, + {"empty_block.input", "empty_block.golden"}, + {"list_of_objects.input", "list_of_objects.golden"}, + {"multiline_string.input", "multiline_string.golden"}, + {"object_singleline.input", "object_singleline.golden"}, + {"object_with_heredoc.input", "object_with_heredoc.golden"}, +} + +func TestFiles(t *testing.T) { + for _, e := range data { + source := filepath.Join(dataDir, e.source) + golden := filepath.Join(dataDir, e.golden) + t.Run(e.source, func(t *testing.T) { + check(t, source, golden) + }) + } +} + +func check(t *testing.T, source, golden string) { + src, err := ioutil.ReadFile(source) + if err != nil { + t.Error(err) + return + } + + res, err := format(src) + if err != nil { + t.Error(err) + return + } + + // update golden files if necessary + if *update { + if err := ioutil.WriteFile(golden, res, 0644); err != nil { + t.Error(err) + } + return + } + + // get golden + gld, err := ioutil.ReadFile(golden) + if err != nil { + t.Error(err) + return + } + + // formatted source and golden must be the same + if err := diff(source, golden, res, gld); err != nil { + t.Error(err) + return + } +} + +// diff compares a and b. +func diff(aname, bname string, a, b []byte) error { + var buf bytes.Buffer // holding long error message + + // compare lengths + if len(a) != len(b) { + fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b)) + } + + // compare contents + line := 1 + offs := 1 + for i := 0; i < len(a) && i < len(b); i++ { + ch := a[i] + if ch != b[i] { + fmt.Fprintf(&buf, "\n%s:%d:%d: %q", aname, line, i-offs+1, lineAt(a, offs)) + fmt.Fprintf(&buf, "\n%s:%d:%d: %q", bname, line, i-offs+1, lineAt(b, offs)) + fmt.Fprintf(&buf, "\n\n") + break + } + if ch == '\n' { + line++ + offs = i + 1 + } + } + + if buf.Len() > 0 { + return errors.New(buf.String()) + } + return nil +} + +// format parses src, prints the corresponding AST, verifies the resulting +// src is syntactically correct, and returns the resulting src or an error +// if any. +func format(src []byte) ([]byte, error) { + formatted, err := Format(src) + if err != nil { + return nil, err + } + + // make sure formatted output is syntactically correct + if _, err := parser.Parse(formatted); err != nil { + return nil, fmt.Errorf("parse: %s\n%s", err, formatted) + } + + return formatted, nil +} + +// lineAt returns the line in text starting at offset offs. +func lineAt(text []byte, offs int) []byte { + i := offs + for i < len(text) && text[i] != '\n' { + i++ + } + return text[offs:i] +} + +// TestFormatParsable ensures that the output of Format() is can be parsed again. +func TestFormatValidOutput(t *testing.T) { + cases := []string{ + "#\x00", + "#\ue123t", + "x=//\n0y=<<_\n_\n", + "y=[1,//\n]", + "Y=<<4\n4/\n\n\n/4/@=4/\n\n\n/4000000004\r\r\n00004\n", + "x=<<_\n_\r\r\n_\n", + "X=<<-\n\r\r\n", + } + + for _, c := range cases { + f, err := Format([]byte(c)) + if err != nil { + // ignore these failures, not all inputs are valid HCL. + t.Logf("Format(%q) = %v", c, err) + continue + } + + if _, err := parser.Parse(f); err != nil { + t.Errorf("Format(%q) = %q; Parse(%q) = %v", c, f, f, err) + continue + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden new file mode 100644 index 000000000..192c26aac --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden @@ -0,0 +1,39 @@ +// A standalone comment is a comment which is not attached to any kind of node + +// This comes from Terraform, as a test +variable "foo" { + # Standalone comment should be still here + + default = "bar" + description = "bar" # yooo +} + +/* This is a multi line standalone +comment*/ + +// fatih arslan +/* This is a developer test +account and a multine comment */ +developer = ["fatih", "arslan"] // fatih arslan + +# One line here +numbers = [1, 2] // another line here + +# Another comment +variable = { + description = "bar" # another yooo + + foo { + # Nested standalone + + bar = "fatih" + } +} + +// lead comment +foo { + bar = "fatih" // line comment 2 +} // line comment 3 + +// comment +multiline = "assignment" diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input new file mode 100644 index 000000000..c4b29de7f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input @@ -0,0 +1,39 @@ +// A standalone comment is a comment which is not attached to any kind of node + + // This comes from Terraform, as a test +variable "foo" { + # Standalone comment should be still here + + default = "bar" + description = "bar" # yooo +} + +/* This is a multi line standalone +comment*/ + + +// fatih arslan +/* This is a developer test +account and a multine comment */ +developer = [ "fatih", "arslan"] // fatih arslan + +# One line here +numbers = [1,2] // another line here + + # Another comment +variable = { + description = "bar" # another yooo + foo { + # Nested standalone + + bar = "fatih" + } +} + + // lead comment +foo { + bar = "fatih" // line comment 2 +} // line comment 3 + +multiline = // comment +"assignment" diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden new file mode 100644 index 000000000..6ff21504c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden @@ -0,0 +1,32 @@ +aligned { + # We have some aligned items below + foo = "fatih" # yoo1 + default = "bar" # yoo2 + bar = "bar and foo" # yoo3 + + default = { + bar = "example" + } + + #deneme arslan + fatih = ["fatih"] # yoo4 + + #fatih arslan + fatiharslan = ["arslan"] // yoo5 + + default = { + bar = "example" + } + + security_groups = [ + "foo", # kenya 1 + "${aws_security_group.firewall.foo}", # kenya 2 + ] + + security_groups2 = [ + "foo", # kenya 1 + "bar", # kenya 1.5 + "${aws_security_group.firewall.foo}", # kenya 2 + "foobar", # kenya 3 + ] +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input new file mode 100644 index 000000000..bd43ab1ad --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input @@ -0,0 +1,28 @@ +aligned { +# We have some aligned items below + foo = "fatih" # yoo1 + default = "bar" # yoo2 + bar = "bar and foo" # yoo3 + default = { + bar = "example" + } + #deneme arslan + fatih = ["fatih"] # yoo4 + #fatih arslan + fatiharslan = ["arslan"] // yoo5 + default = { + bar = "example" + } + +security_groups = [ + "foo", # kenya 1 + "${aws_security_group.firewall.foo}", # kenya 2 +] + +security_groups2 = [ + "foo", # kenya 1 + "bar", # kenya 1.5 + "${aws_security_group.firewall.foo}", # kenya 2 + "foobar", # kenya 3 +] +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden new file mode 100644 index 000000000..e778eafa3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden @@ -0,0 +1,13 @@ +banana = [ + # I really want to comment this item in the array. + "a", + + # This as well + "b", + + "c", # And C + "d", + + # And another + "e", +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input new file mode 100644 index 000000000..e778eafa3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input @@ -0,0 +1,13 @@ +banana = [ + # I really want to comment this item in the array. + "a", + + # This as well + "b", + + "c", # And C + "d", + + # And another + "e", +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input new file mode 100644 index 000000000..495508644 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input @@ -0,0 +1,39 @@ +// A standalone comment is a comment which is not attached to any kind of node + + // This comes from Terraform, as a test +variable "foo" { + # Standalone comment should be still here + + default = "bar" + description = "bar" # yooo +} + +/* This is a multi line standalone +comment*/ + + +// fatih arslan +/* This is a developer test +account and a multine comment */ +developer = [ "fatih", "arslan"] // fatih arslan + +# One line here +numbers = [1,2] // another line here + + # Another comment +variable = { + description = "bar" # another yooo + foo { + # Nested standalone + + bar = "fatih" + } +} + + // lead comment +foo { + bar = "fatih" // line comment 2 +} // line comment 3 + +multiline = // comment +"assignment" diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden new file mode 100644 index 000000000..dbeae36a8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden @@ -0,0 +1,6 @@ +resource "blah" "blah" {} + +// +// +// + diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input new file mode 100644 index 000000000..68c4c282e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input @@ -0,0 +1,5 @@ +resource "blah" "blah" {} + +// +// +// diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden new file mode 100644 index 000000000..74c4ccd89 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden @@ -0,0 +1,12 @@ +resource "provider" "resource" { + /* + SPACE_SENSITIVE_CODE = < 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start && ch != eof { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go new file mode 100644 index 000000000..58d68f5c5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go @@ -0,0 +1,642 @@ +package scanner + +import ( + "bytes" + "fmt" + "testing" + + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +var f100 = strings.Repeat("f", 100) + +type tokenPair struct { + tok token.Type + text string +} + +var tokenLists = map[string][]tokenPair{ + "comment": []tokenPair{ + {token.COMMENT, "//"}, + {token.COMMENT, "////"}, + {token.COMMENT, "// comment"}, + {token.COMMENT, "// /* comment */"}, + {token.COMMENT, "// // comment //"}, + {token.COMMENT, "//" + f100}, + {token.COMMENT, "#"}, + {token.COMMENT, "##"}, + {token.COMMENT, "# comment"}, + {token.COMMENT, "# /* comment */"}, + {token.COMMENT, "# # comment #"}, + {token.COMMENT, "#" + f100}, + {token.COMMENT, "/**/"}, + {token.COMMENT, "/***/"}, + {token.COMMENT, "/* comment */"}, + {token.COMMENT, "/* // comment */"}, + {token.COMMENT, "/* /* comment */"}, + {token.COMMENT, "/*\n comment\n*/"}, + {token.COMMENT, "/*" + f100 + "*/"}, + }, + "operator": []tokenPair{ + {token.LBRACK, "["}, + {token.LBRACE, "{"}, + {token.COMMA, ","}, + {token.PERIOD, "."}, + {token.RBRACK, "]"}, + {token.RBRACE, "}"}, + {token.ASSIGN, "="}, + {token.ADD, "+"}, + {token.SUB, "-"}, + }, + "bool": []tokenPair{ + {token.BOOL, "true"}, + {token.BOOL, "false"}, + }, + "ident": []tokenPair{ + {token.IDENT, "a"}, + {token.IDENT, "a0"}, + {token.IDENT, "foobar"}, + {token.IDENT, "foo-bar"}, + {token.IDENT, "abc123"}, + {token.IDENT, "LGTM"}, + {token.IDENT, "_"}, + {token.IDENT, "_abc123"}, + {token.IDENT, "abc123_"}, + {token.IDENT, "_abc_123_"}, + {token.IDENT, "_äöü"}, + {token.IDENT, "_本"}, + {token.IDENT, "äöü"}, + {token.IDENT, "本"}, + {token.IDENT, "a۰۱۸"}, + {token.IDENT, "foo६४"}, + {token.IDENT, "bar9876"}, + }, + "heredoc": []tokenPair{ + {token.HEREDOC, "< 0 for %q", s.ErrorCount, src) + } +} + +func testTokenList(t *testing.T, tokenList []tokenPair) { + // create artifical source code + buf := new(bytes.Buffer) + for _, ident := range tokenList { + fmt.Fprintf(buf, "%s\n", ident.text) + } + + s := New(buf.Bytes()) + for _, ident := range tokenList { + tok := s.Scan() + if tok.Type != ident.tok { + t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text) + } + + if tok.Text != ident.text { + t.Errorf("text = %q want %q", tok.String(), ident.text) + } + + } +} + +func countNewlines(s string) int { + n := 0 + for _, ch := range s { + if ch == '\n' { + n++ + } + } + return n +} + +func TestScanDigitsUnread(t *testing.T) { + cases := []string{ + "M=0\"\\00", + "M=\"\\00", + "\"\\00", + "M=[\"\\00", + "U{\"\\00", + "\"\n{}#\n\"\\00", + "M=[[\"\\00", + "U{d=0\"\\U00", + "#\n\"\\x00", + "m=[[[\"\\00", + } + + for _, c := range cases { + s := New([]byte(c)) + + for { + tok := s.Scan() + if tok.Type == token.EOF { + break + } + t.Logf("s.Scan() = %s", tok) + } + } +} + +func TestScanHeredocRegexpCompile(t *testing.T) { + cases := []string{ + "0\xe1\n<<ȸ\nhello\nworld\nȸ", + } + + for _, c := range cases { + s := New([]byte(c)) + + for { + tok := s.Scan() + if tok.Type == token.EOF { + break + } + t.Logf("s.Scan() = %s", tok) + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 000000000..5f981eaa2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,241 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + if s[0] == '\n' { + return "", ErrSyntax + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go new file mode 100644 index 000000000..65be375d9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go @@ -0,0 +1,96 @@ +package strconv + +import "testing" + +type quoteTest struct { + in string + out string + ascii string +} + +var quotetests = []quoteTest{ + {"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`}, + {"\\", `"\\"`, `"\\"`}, + {"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`}, + {"\u263a", `"☺"`, `"\u263a"`}, + {"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`}, + {"\x04", `"\x04"`, `"\x04"`}, +} + +type unQuoteTest struct { + in string + out string +} + +var unquotetests = []unQuoteTest{ + {`""`, ""}, + {`"a"`, "a"}, + {`"abc"`, "abc"}, + {`"☺"`, "☺"}, + {`"hello world"`, "hello world"}, + {`"\xFF"`, "\xFF"}, + {`"\377"`, "\377"}, + {`"\u1234"`, "\u1234"}, + {`"\U00010111"`, "\U00010111"}, + {`"\U0001011111"`, "\U0001011111"}, + {`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""}, + {`"'"`, "'"}, + {`"${file("foo")}"`, `${file("foo")}`}, + {`"${file("\"foo\"")}"`, `${file("\"foo\"")}`}, + {`"echo ${var.region}${element(split(",",var.zones),0)}"`, + `echo ${var.region}${element(split(",",var.zones),0)}`}, + {`"${HH\\:mm\\:ss}"`, `${HH\\:mm\\:ss}`}, + {`"${\n}"`, `${\n}`}, +} + +var misquoted = []string{ + ``, + `"`, + `"a`, + `"'`, + `b"`, + `"\"`, + `"\9"`, + `"\19"`, + `"\129"`, + `'\'`, + `'\9'`, + `'\19'`, + `'\129'`, + `'ab'`, + `"\x1!"`, + `"\U12345678"`, + `"\z"`, + "`", + "`xxx", + "`\"", + `"\'"`, + `'\"'`, + "\"\n\"", + "\"\\n\n\"", + "'\n'", + `"${"`, + `"${foo{}"`, + "\"${foo}\n\"", +} + +func TestUnquote(t *testing.T) { + for _, tt := range unquotetests { + if out, err := Unquote(tt.in); err != nil || out != tt.out { + t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out) + } + } + + // run the quote tests too, backward + for _, tt := range quotetests { + if in, err := Unquote(tt.out); in != tt.in { + t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in) + } + } + + for _, s := range misquoted { + if out, err := Unquote(s); out != "" || err != ErrSyntax { + t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax) + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl new file mode 100644 index 000000000..78c267582 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl @@ -0,0 +1,4 @@ +foo = [ + "1", + "2", # comment +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl new file mode 100644 index 000000000..eb5a99a69 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl @@ -0,0 +1,6 @@ +resource = [{ + "foo": { + "bar": {}, + "baz": [1, 2, "foo"], + } +}] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl new file mode 100644 index 000000000..1ff7f29fd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl @@ -0,0 +1,15 @@ +// Foo + +/* Bar */ + +/* +/* +Baz +*/ + +# Another + +# Multiple +# Lines + +foo = "bar" diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl new file mode 100644 index 000000000..fec56017d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl @@ -0,0 +1 @@ +# Hello diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl new file mode 100644 index 000000000..cccb5b06f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl @@ -0,0 +1,42 @@ +// This comes from Terraform, as a test +variable "foo" { + default = "bar" + description = "bar" +} + +provider "aws" { + access_key = "foo" + secret_key = "bar" +} + +provider "do" { + api_key = "${var.foo}" +} + +resource "aws_security_group" "firewall" { + count = 5 +} + +resource aws_instance "web" { + ami = "${var.foo}" + security_groups = [ + "foo", + "${aws_security_group.firewall.foo}" + ] + + network_interface { + device_index = 0 + description = "Main network interface" + } +} + +resource "aws_instance" "db" { + security_groups = "${aws_security_group.firewall.*.id}" + VPC = "foo" + + depends_on = ["aws_instance.web"] +} + +output "web_ip" { + value = "${aws_instance.web.private_ip}" +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl new file mode 100644 index 000000000..0007aaf5f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl @@ -0,0 +1 @@ +foo.bar = "baz" diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl new file mode 100644 index 000000000..059d4ce65 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl @@ -0,0 +1 @@ +foo = [1, 2, "foo"] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl new file mode 100644 index 000000000..50f4218ac --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl @@ -0,0 +1 @@ +foo = [1, 2, "foo",] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl new file mode 100644 index 000000000..029c54b0c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl @@ -0,0 +1,2 @@ +foo = "bar" +key = 7 diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl new file mode 100644 index 000000000..e9f77cae9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl @@ -0,0 +1,3 @@ +default = { + "eu-west-1": "ami-b1cf19c6", +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl new file mode 100644 index 000000000..92592fbb3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl @@ -0,0 +1,5 @@ +// This is a test structure for the lexer +foo bar "baz" { + key = 7 + foo = "bar" +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl new file mode 100644 index 000000000..7229a1f01 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl @@ -0,0 +1,5 @@ +foo { + value = 7 + "value" = 8 + "complex::value" = 9 +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl new file mode 100644 index 000000000..4d156ddea --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl @@ -0,0 +1 @@ +resource "foo" "bar" {} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl new file mode 100644 index 000000000..cf2747ea1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl @@ -0,0 +1,7 @@ +foo = "bar" +bar = 7 +baz = [1,2,3] +foo = -12 +bar = 3.14159 +foo = true +bar = false diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 000000000..e37c0664e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 000000000..125a5f072 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,313 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // If we have a zero keycount it means that we never got + // an object key, i.e. `{ :`. This is a syntax error. + if keyCount == 0 { + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + + // Done + return keys, nil + case token.ILLEGAL: + return nil, errors.New("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go new file mode 100644 index 000000000..e0cebf50a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go @@ -0,0 +1,384 @@ +package parser + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +func TestType(t *testing.T) { + var literals = []struct { + typ token.Type + src string + }{ + {token.STRING, `"foo": "bar"`}, + {token.NUMBER, `"foo": 123`}, + {token.FLOAT, `"foo": 123.12`}, + {token.FLOAT, `"foo": -123.12`}, + {token.BOOL, `"foo": true`}, + {token.STRING, `"foo": null`}, + } + + for _, l := range literals { + t.Logf("Testing: %s", l.src) + + p := newParser([]byte(l.src)) + item, err := p.objectItem() + if err != nil { + t.Error(err) + } + + lit, ok := item.Val.(*ast.LiteralType) + if !ok { + t.Errorf("node should be of type LiteralType, got: %T", item.Val) + } + + if lit.Token.Type != l.typ { + t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type) + } + } +} + +func TestListType(t *testing.T) { + var literals = []struct { + src string + tokens []token.Type + }{ + { + `"foo": ["123", 123]`, + []token.Type{token.STRING, token.NUMBER}, + }, + { + `"foo": [123, "123",]`, + []token.Type{token.NUMBER, token.STRING}, + }, + { + `"foo": []`, + []token.Type{}, + }, + { + `"foo": ["123", 123]`, + []token.Type{token.STRING, token.NUMBER}, + }, + { + `"foo": ["123", {}]`, + []token.Type{token.STRING, token.LBRACE}, + }, + } + + for _, l := range literals { + t.Logf("Testing: %s", l.src) + + p := newParser([]byte(l.src)) + item, err := p.objectItem() + if err != nil { + t.Error(err) + } + + list, ok := item.Val.(*ast.ListType) + if !ok { + t.Errorf("node should be of type LiteralType, got: %T", item.Val) + } + + tokens := []token.Type{} + for _, li := range list.List { + switch v := li.(type) { + case *ast.LiteralType: + tokens = append(tokens, v.Token.Type) + case *ast.ObjectType: + tokens = append(tokens, token.LBRACE) + } + } + + equals(t, l.tokens, tokens) + } +} + +func TestObjectType(t *testing.T) { + var literals = []struct { + src string + nodeType []ast.Node + itemLen int + }{ + { + `"foo": {}`, + nil, + 0, + }, + { + `"foo": { + "bar": "fatih" + }`, + []ast.Node{&ast.LiteralType{}}, + 1, + }, + { + `"foo": { + "bar": "fatih", + "baz": ["arslan"] + }`, + []ast.Node{ + &ast.LiteralType{}, + &ast.ListType{}, + }, + 2, + }, + { + `"foo": { + "bar": {} + }`, + []ast.Node{ + &ast.ObjectType{}, + }, + 1, + }, + { + `"foo": { + "bar": {}, + "foo": true + }`, + []ast.Node{ + &ast.ObjectType{}, + &ast.LiteralType{}, + }, + 2, + }, + } + + for _, l := range literals { + t.Logf("Testing:\n%s\n", l.src) + + p := newParser([]byte(l.src)) + // p.enableTrace = true + item, err := p.objectItem() + if err != nil { + t.Error(err) + } + + // we know that the ObjectKey name is foo for all cases, what matters + // is the object + obj, ok := item.Val.(*ast.ObjectType) + if !ok { + t.Errorf("node should be of type LiteralType, got: %T", item.Val) + } + + // check if the total length of items are correct + equals(t, l.itemLen, len(obj.List.Items)) + + // check if the types are correct + for i, item := range obj.List.Items { + equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) + } + } +} + +func TestFlattenObjects(t *testing.T) { + var literals = []struct { + src string + nodeType []ast.Node + itemLen int + }{ + { + `{ + "foo": [ + { + "foo": "svh", + "bar": "fatih" + } + ] + }`, + []ast.Node{ + &ast.ObjectType{}, + &ast.LiteralType{}, + &ast.LiteralType{}, + }, + 3, + }, + { + `{ + "variable": { + "foo": {} + } + }`, + []ast.Node{ + &ast.ObjectType{}, + }, + 1, + }, + { + `{ + "empty": [] + }`, + []ast.Node{ + &ast.ListType{}, + }, + 1, + }, + { + `{ + "basic": [1, 2, 3] + }`, + []ast.Node{ + &ast.ListType{}, + }, + 1, + }, + } + + for _, l := range literals { + t.Logf("Testing:\n%s\n", l.src) + + f, err := Parse([]byte(l.src)) + if err != nil { + t.Error(err) + } + + // the first object is always an ObjectList so just assert that one + // so we can use it as such + obj, ok := f.Node.(*ast.ObjectList) + if !ok { + t.Errorf("node should be *ast.ObjectList, got: %T", f.Node) + } + + // check if the types are correct + var i int + for _, item := range obj.Items { + equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) + i++ + + if obj, ok := item.Val.(*ast.ObjectType); ok { + for _, item := range obj.List.Items { + equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) + i++ + } + } + } + + // check if the number of items is correct + equals(t, l.itemLen, i) + + } +} + +func TestObjectKey(t *testing.T) { + keys := []struct { + exp []token.Type + src string + }{ + {[]token.Type{token.STRING}, `"foo": {}`}, + } + + for _, k := range keys { + p := newParser([]byte(k.src)) + keys, err := p.objectKey() + if err != nil { + t.Fatal(err) + } + + tokens := []token.Type{} + for _, o := range keys { + tokens = append(tokens, o.Token.Type) + } + + equals(t, k.exp, tokens) + } + + errKeys := []struct { + src string + }{ + {`foo 12 {}`}, + {`foo bar = {}`}, + {`foo []`}, + {`12 {}`}, + } + + for _, k := range errKeys { + p := newParser([]byte(k.src)) + _, err := p.objectKey() + if err == nil { + t.Errorf("case '%s' should give an error", k.src) + } + } +} + +// Official HCL tests +func TestParse(t *testing.T) { + cases := []struct { + Name string + Err bool + }{ + { + "array.json", + false, + }, + { + "basic.json", + false, + }, + { + "object.json", + false, + }, + { + "types.json", + false, + }, + { + "bad_input_128.json", + true, + }, + { + "bad_input_tf_8110.json", + true, + }, + { + "good_input_tf_8110.json", + false, + }, + } + + const fixtureDir = "./test-fixtures" + + for _, tc := range cases { + d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name)) + if err != nil { + t.Fatalf("err: %s", err) + } + + _, err = Parse(d) + if (err != nil) != tc.Err { + t.Fatalf("Input: %s\n\nError: %s", tc.Name, err) + } + } +} + +func TestParse_inline(t *testing.T) { + cases := []struct { + Value string + Err bool + }{ + {"{:{", true}, + } + + for _, tc := range cases { + _, err := Parse([]byte(tc.Value)) + if (err != nil) != tc.Err { + t.Fatalf("Input: %q\n\nError: %s", tc.Value, err) + } + } +} + +// equals fails the test if exp is not equal to act. +func equals(tb testing.TB, exp, act interface{}) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d:\n\n\texp: %s\n\n\tgot: %s\033[39m\n\n", filepath.Base(file), line, exp, act) + tb.FailNow() + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json new file mode 100644 index 000000000..e320f17ab --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json @@ -0,0 +1,4 @@ +{ + "foo": [1, 2, "bar"], + "bar": "baz" +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_128.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_128.json new file mode 100644 index 000000000..b5f850c96 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_128.json @@ -0,0 +1 @@ +{:{ diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_tf_8110.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_tf_8110.json new file mode 100644 index 000000000..a04385833 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_tf_8110.json @@ -0,0 +1,7 @@ +{ + "variable": { + "poc": { + "default": "${replace("europe-west", "-", " ")}" + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json new file mode 100644 index 000000000..b54bde96c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json @@ -0,0 +1,3 @@ +{ + "foo": "bar" +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/good_input_tf_8110.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/good_input_tf_8110.json new file mode 100644 index 000000000..f21aa090d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/good_input_tf_8110.json @@ -0,0 +1,7 @@ +{ + "variable": { + "poc": { + "default": "${replace(\"europe-west\", \"-\", \" \")}" + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json new file mode 100644 index 000000000..72168a3cc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json @@ -0,0 +1,5 @@ +{ + "foo": { + "bar": [1,2] + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json new file mode 100644 index 000000000..9a142a6ca --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json @@ -0,0 +1,10 @@ +{ + "foo": "bar", + "bar": 7, + "baz": [1,2,3], + "foo": -12, + "bar": 3.14159, + "foo": true, + "bar": false, + "foo": null +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 000000000..fe3f0f095 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go new file mode 100644 index 000000000..3033a5797 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go @@ -0,0 +1,362 @@ +package scanner + +import ( + "bytes" + "fmt" + "testing" + + "github.com/hashicorp/hcl/json/token" +) + +var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + +type tokenPair struct { + tok token.Type + text string +} + +var tokenLists = map[string][]tokenPair{ + "operator": []tokenPair{ + {token.LBRACK, "["}, + {token.LBRACE, "{"}, + {token.COMMA, ","}, + {token.PERIOD, "."}, + {token.RBRACK, "]"}, + {token.RBRACE, "}"}, + }, + "bool": []tokenPair{ + {token.BOOL, "true"}, + {token.BOOL, "false"}, + }, + "string": []tokenPair{ + {token.STRING, `" "`}, + {token.STRING, `"a"`}, + {token.STRING, `"本"`}, + {token.STRING, `"${file(\"foo\")}"`}, + {token.STRING, `"\a"`}, + {token.STRING, `"\b"`}, + {token.STRING, `"\f"`}, + {token.STRING, `"\n"`}, + {token.STRING, `"\r"`}, + {token.STRING, `"\t"`}, + {token.STRING, `"\v"`}, + {token.STRING, `"\""`}, + {token.STRING, `"\000"`}, + {token.STRING, `"\777"`}, + {token.STRING, `"\x00"`}, + {token.STRING, `"\xff"`}, + {token.STRING, `"\u0000"`}, + {token.STRING, `"\ufA16"`}, + {token.STRING, `"\U00000000"`}, + {token.STRING, `"\U0000ffAB"`}, + {token.STRING, `"` + f100 + `"`}, + }, + "number": []tokenPair{ + {token.NUMBER, "0"}, + {token.NUMBER, "1"}, + {token.NUMBER, "9"}, + {token.NUMBER, "42"}, + {token.NUMBER, "1234567890"}, + {token.NUMBER, "-0"}, + {token.NUMBER, "-1"}, + {token.NUMBER, "-9"}, + {token.NUMBER, "-42"}, + {token.NUMBER, "-1234567890"}, + }, + "float": []tokenPair{ + {token.FLOAT, "0."}, + {token.FLOAT, "1."}, + {token.FLOAT, "42."}, + {token.FLOAT, "01234567890."}, + {token.FLOAT, ".0"}, + {token.FLOAT, ".1"}, + {token.FLOAT, ".42"}, + {token.FLOAT, ".0123456789"}, + {token.FLOAT, "0.0"}, + {token.FLOAT, "1.0"}, + {token.FLOAT, "42.0"}, + {token.FLOAT, "01234567890.0"}, + {token.FLOAT, "0e0"}, + {token.FLOAT, "1e0"}, + {token.FLOAT, "42e0"}, + {token.FLOAT, "01234567890e0"}, + {token.FLOAT, "0E0"}, + {token.FLOAT, "1E0"}, + {token.FLOAT, "42E0"}, + {token.FLOAT, "01234567890E0"}, + {token.FLOAT, "0e+10"}, + {token.FLOAT, "1e-10"}, + {token.FLOAT, "42e+10"}, + {token.FLOAT, "01234567890e-10"}, + {token.FLOAT, "0E+10"}, + {token.FLOAT, "1E-10"}, + {token.FLOAT, "42E+10"}, + {token.FLOAT, "01234567890E-10"}, + {token.FLOAT, "01.8e0"}, + {token.FLOAT, "1.4e0"}, + {token.FLOAT, "42.2e0"}, + {token.FLOAT, "01234567890.12e0"}, + {token.FLOAT, "0.E0"}, + {token.FLOAT, "1.12E0"}, + {token.FLOAT, "42.123E0"}, + {token.FLOAT, "01234567890.213E0"}, + {token.FLOAT, "0.2e+10"}, + {token.FLOAT, "1.2e-10"}, + {token.FLOAT, "42.54e+10"}, + {token.FLOAT, "01234567890.98e-10"}, + {token.FLOAT, "0.1E+10"}, + {token.FLOAT, "1.1E-10"}, + {token.FLOAT, "42.1E+10"}, + {token.FLOAT, "01234567890.1E-10"}, + {token.FLOAT, "-0.0"}, + {token.FLOAT, "-1.0"}, + {token.FLOAT, "-42.0"}, + {token.FLOAT, "-01234567890.0"}, + {token.FLOAT, "-0e0"}, + {token.FLOAT, "-1e0"}, + {token.FLOAT, "-42e0"}, + {token.FLOAT, "-01234567890e0"}, + {token.FLOAT, "-0E0"}, + {token.FLOAT, "-1E0"}, + {token.FLOAT, "-42E0"}, + {token.FLOAT, "-01234567890E0"}, + {token.FLOAT, "-0e+10"}, + {token.FLOAT, "-1e-10"}, + {token.FLOAT, "-42e+10"}, + {token.FLOAT, "-01234567890e-10"}, + {token.FLOAT, "-0E+10"}, + {token.FLOAT, "-1E-10"}, + {token.FLOAT, "-42E+10"}, + {token.FLOAT, "-01234567890E-10"}, + {token.FLOAT, "-01.8e0"}, + {token.FLOAT, "-1.4e0"}, + {token.FLOAT, "-42.2e0"}, + {token.FLOAT, "-01234567890.12e0"}, + {token.FLOAT, "-0.E0"}, + {token.FLOAT, "-1.12E0"}, + {token.FLOAT, "-42.123E0"}, + {token.FLOAT, "-01234567890.213E0"}, + {token.FLOAT, "-0.2e+10"}, + {token.FLOAT, "-1.2e-10"}, + {token.FLOAT, "-42.54e+10"}, + {token.FLOAT, "-01234567890.98e-10"}, + {token.FLOAT, "-0.1E+10"}, + {token.FLOAT, "-1.1E-10"}, + {token.FLOAT, "-42.1E+10"}, + {token.FLOAT, "-01234567890.1E-10"}, + }, +} + +var orderedTokenLists = []string{ + "comment", + "operator", + "bool", + "string", + "number", + "float", +} + +func TestPosition(t *testing.T) { + // create artifical source code + buf := new(bytes.Buffer) + + for _, listName := range orderedTokenLists { + for _, ident := range tokenLists[listName] { + fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text) + } + } + + s := New(buf.Bytes()) + + pos := token.Pos{"", 4, 1, 5} + s.Scan() + for _, listName := range orderedTokenLists { + + for _, k := range tokenLists[listName] { + curPos := s.tokPos + // fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column) + + if curPos.Offset != pos.Offset { + t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text) + } + if curPos.Line != pos.Line { + t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text) + } + if curPos.Column != pos.Column { + t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text) + } + pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline + pos.Line += countNewlines(k.text) + 1 // each token is on a new line + + s.Error = func(pos token.Pos, msg string) { + t.Errorf("error %q for %q", msg, k.text) + } + + s.Scan() + } + } + // make sure there were no token-internal errors reported by scanner + if s.ErrorCount != 0 { + t.Errorf("%d errors", s.ErrorCount) + } +} + +func TestComment(t *testing.T) { + testTokenList(t, tokenLists["comment"]) +} + +func TestOperator(t *testing.T) { + testTokenList(t, tokenLists["operator"]) +} + +func TestBool(t *testing.T) { + testTokenList(t, tokenLists["bool"]) +} + +func TestIdent(t *testing.T) { + testTokenList(t, tokenLists["ident"]) +} + +func TestString(t *testing.T) { + testTokenList(t, tokenLists["string"]) +} + +func TestNumber(t *testing.T) { + testTokenList(t, tokenLists["number"]) +} + +func TestFloat(t *testing.T) { + testTokenList(t, tokenLists["float"]) +} + +func TestRealExample(t *testing.T) { + complexReal := ` +{ + "variable": { + "foo": { + "default": "bar", + "description": "bar", + "depends_on": ["something"] + } + } +}` + + literals := []struct { + tokenType token.Type + literal string + }{ + {token.LBRACE, `{`}, + {token.STRING, `"variable"`}, + {token.COLON, `:`}, + {token.LBRACE, `{`}, + {token.STRING, `"foo"`}, + {token.COLON, `:`}, + {token.LBRACE, `{`}, + {token.STRING, `"default"`}, + {token.COLON, `:`}, + {token.STRING, `"bar"`}, + {token.COMMA, `,`}, + {token.STRING, `"description"`}, + {token.COLON, `:`}, + {token.STRING, `"bar"`}, + {token.COMMA, `,`}, + {token.STRING, `"depends_on"`}, + {token.COLON, `:`}, + {token.LBRACK, `[`}, + {token.STRING, `"something"`}, + {token.RBRACK, `]`}, + {token.RBRACE, `}`}, + {token.RBRACE, `}`}, + {token.RBRACE, `}`}, + {token.EOF, ``}, + } + + s := New([]byte(complexReal)) + for _, l := range literals { + tok := s.Scan() + if l.tokenType != tok.Type { + t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String()) + } + + if l.literal != tok.Text { + t.Errorf("got: %s want %s\n", tok, l.literal) + } + } + +} + +func TestError(t *testing.T) { + testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) + testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) + + testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING) + testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING) + + testError(t, `01238`, "1:7", "numbers cannot start with 0", token.NUMBER) + testError(t, `01238123`, "1:10", "numbers cannot start with 0", token.NUMBER) + testError(t, `'aa'`, "1:1", "illegal char: '", token.ILLEGAL) + + testError(t, `"`, "1:2", "literal not terminated", token.STRING) + testError(t, `"abc`, "1:5", "literal not terminated", token.STRING) + testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING) +} + +func testError(t *testing.T, src, pos, msg string, tok token.Type) { + s := New([]byte(src)) + + errorCalled := false + s.Error = func(p token.Pos, m string) { + if !errorCalled { + if pos != p.String() { + t.Errorf("pos = %q, want %q for %q", p, pos, src) + } + + if m != msg { + t.Errorf("msg = %q, want %q for %q", m, msg, src) + } + errorCalled = true + } + } + + tk := s.Scan() + if tk.Type != tok { + t.Errorf("tok = %s, want %s for %q", tk, tok, src) + } + if !errorCalled { + t.Errorf("error handler not called for %q", src) + } + if s.ErrorCount == 0 { + t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src) + } +} + +func testTokenList(t *testing.T, tokenList []tokenPair) { + // create artifical source code + buf := new(bytes.Buffer) + for _, ident := range tokenList { + fmt.Fprintf(buf, "%s\n", ident.text) + } + + s := New(buf.Bytes()) + for _, ident := range tokenList { + tok := s.Scan() + if tok.Type != ident.tok { + t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text) + } + + if tok.Text != ident.text { + t.Errorf("text = %q want %q", tok.String(), ident.text) + } + + } +} + +func countNewlines(s string) int { + n := 0 + for _, ch := range s { + if ch == '\n' { + n++ + } + } + return n +} diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json new file mode 100644 index 000000000..e320f17ab --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json @@ -0,0 +1,4 @@ +{ + "foo": [1, 2, "bar"], + "bar": "baz" +} diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json new file mode 100644 index 000000000..b54bde96c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json @@ -0,0 +1,3 @@ +{ + "foo": "bar" +} diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json new file mode 100644 index 000000000..72168a3cc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json @@ -0,0 +1,5 @@ +{ + "foo": { + "bar": [1,2] + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json new file mode 100644 index 000000000..9a142a6ca --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json @@ -0,0 +1,10 @@ +{ + "foo": "bar", + "bar": 7, + "baz": [1,2,3], + "foo": -12, + "bar": 3.14159, + "foo": true, + "bar": false, + "foo": null +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 000000000..95a0c3eee --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token_test.go b/vendor/github.com/hashicorp/hcl/json/token/token_test.go new file mode 100644 index 000000000..a83fdd55b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token_test.go @@ -0,0 +1,34 @@ +package token + +import ( + "testing" +) + +func TestTypeString(t *testing.T) { + var tokens = []struct { + tt Type + str string + }{ + {ILLEGAL, "ILLEGAL"}, + {EOF, "EOF"}, + {NUMBER, "NUMBER"}, + {FLOAT, "FLOAT"}, + {BOOL, "BOOL"}, + {STRING, "STRING"}, + {NULL, "NULL"}, + {LBRACK, "LBRACK"}, + {LBRACE, "LBRACE"}, + {COMMA, "COMMA"}, + {PERIOD, "PERIOD"}, + {RBRACK, "RBRACK"}, + {RBRACE, "RBRACE"}, + } + + for _, token := range tokens { + if token.tt.String() != token.str { + t.Errorf("want: %q got:%q\n", token.str, token.tt) + + } + } + +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 000000000..d9993c292 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/lex_test.go b/vendor/github.com/hashicorp/hcl/lex_test.go new file mode 100644 index 000000000..806276444 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex_test.go @@ -0,0 +1,37 @@ +package hcl + +import ( + "testing" +) + +func TestLexMode(t *testing.T) { + cases := []struct { + Input string + Mode lexModeValue + }{ + { + "", + lexModeHcl, + }, + { + "foo", + lexModeHcl, + }, + { + "{}", + lexModeJson, + }, + { + " {}", + lexModeJson, + }, + } + + for i, tc := range cases { + actual := lexMode([]byte(tc.Input)) + + if actual != tc.Mode { + t.Fatalf("%d: %#v", i, actual) + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 000000000..1fca53c4c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/assign_deep.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/assign_deep.hcl new file mode 100644 index 000000000..dd3151cb7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/assign_deep.hcl @@ -0,0 +1,5 @@ +resource = [{ + foo = [{ + bar = {} + }] +}] diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl new file mode 100644 index 000000000..949994487 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl @@ -0,0 +1,2 @@ +foo = "bar" +bar = "${file("bing/bong.txt")}" diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json new file mode 100644 index 000000000..7bdddc84b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json @@ -0,0 +1,4 @@ +{ + "foo": "bar", + "bar": "${file(\"bing/bong.txt\")}" +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl new file mode 100644 index 000000000..4e415da20 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl @@ -0,0 +1 @@ +count = "3" diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl new file mode 100644 index 000000000..363697b49 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl @@ -0,0 +1,3 @@ +foo="bar" +bar="${file("bing/bong.txt")}" +foo-bar="baz" diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/block_assign.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/block_assign.hcl new file mode 100644 index 000000000..ee8b06fe3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/block_assign.hcl @@ -0,0 +1,2 @@ +environment = "aws" { +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl new file mode 100644 index 000000000..5b185cc91 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl @@ -0,0 +1,15 @@ +key "" { + policy = "read" +} + +key "foo/" { + policy = "write" +} + +key "foo/bar/" { + policy = "read" +} + +key "foo/bar/baz" { + policy = "deny" +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json new file mode 100644 index 000000000..151864ee8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json @@ -0,0 +1,19 @@ +{ + "key": { + "": { + "policy": "read" + }, + + "foo/": { + "policy": "write" + }, + + "foo/bar/": { + "policy": "read" + }, + + "foo/bar/baz": { + "policy": "deny" + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl new file mode 100644 index 000000000..52dcaa1bc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl @@ -0,0 +1,10 @@ +variable "foo" { + default = "bar" + description = "bar" +} + +variable "amis" { + default = { + east = "foo" + } +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json new file mode 100644 index 000000000..49f921ed0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json @@ -0,0 +1,14 @@ +{ + "variable": { + "foo": { + "default": "bar", + "description": "bar" + }, + + "amis": { + "default": { + "east": "foo" + } + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl new file mode 100644 index 000000000..5be1b2315 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl @@ -0,0 +1 @@ +resource "foo" {} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl new file mode 100644 index 000000000..f818b15e0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl @@ -0,0 +1,6 @@ +foo = "bar\"baz\\n" +bar = "new\nline" +qux = "back\\slash" +qax = "slash\\:colon" +nested = "${HH\\:mm\\:ss}" +nestedquotes = "${"\"stringwrappedinquotes\""}" diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/escape_backslash.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/escape_backslash.hcl new file mode 100644 index 000000000..bc337fb7c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/escape_backslash.hcl @@ -0,0 +1,5 @@ +output { + one = "${replace(var.sub_domain, ".", "\\.")}" + two = "${replace(var.sub_domain, ".", "\\\\.")}" + many = "${replace(var.sub_domain, ".", "\\\\\\\\.")}" +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl new file mode 100644 index 000000000..9bca551f8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl @@ -0,0 +1,2 @@ +foo = "bar" +Key = 7 diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl new file mode 100644 index 000000000..edf355e38 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl @@ -0,0 +1,2 @@ +a = 1.02 +b = 2 diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.json b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json new file mode 100644 index 000000000..580868043 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json @@ -0,0 +1,4 @@ +{ + "a": 1.02, + "b": 2 +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/git_crypt.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/git_crypt.hcl new file mode 100644 index 000000000..f691948e1 Binary files /dev/null and b/vendor/github.com/hashicorp/hcl/test-fixtures/git_crypt.hcl differ diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate.json b/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate.json new file mode 100644 index 000000000..cad015198 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate.json @@ -0,0 +1,3 @@ +{ + "default": "${replace(\"europe-west\", \"-\", \" \")}" +} diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_lists.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_lists.hcl new file mode 100644 index 000000000..8af345849 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_lists.hcl @@ -0,0 +1,2 @@ +foo = [["foo"], ["bar"]] + diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_maps.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_maps.hcl new file mode 100644 index 000000000..985a33bae --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_maps.hcl @@ -0,0 +1,4 @@ +foo = [ + {somekey1 = "someval1"}, + {somekey2 = "someval2", someextrakey = "someextraval"}, +] diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl new file mode 100644 index 000000000..f883bd707 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl @@ -0,0 +1,4 @@ +foo = < 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemEOF + itemKey // a key + itemValue // a value + itemComment // a comment +) + +// defines a constant for EOF +const eof = -1 + +// permitted whitespace characters space, FF and TAB +const whitespace = " \f\t" + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + input string // the string being scanned + state stateFn // the next lexing function to enter + pos int // current position in the input + start int // start position of this item + width int // width of last rune read from input + lastPos int // position of most recent item returned by nextItem + runes []rune // scanned runes for this item + items chan item // channel of scanned items +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = w + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + i := item{t, l.start, string(l.runes)} + l.items <- i + l.start = l.pos + l.runes = l.runes[:0] +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// appends the rune to the current value +func (l *lexer) appendRune(r rune) { + l.runes = append(l.runes, r) +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.ContainsRune(valid, l.next()) { + } + l.backup() +} + +// acceptRunUntil consumes a run of runes up to a terminator. +func (l *lexer) acceptRunUntil(term rune) { + for term != l.next() { + } + l.backup() +} + +// hasText returns true if the current parsed text is not empty. +func (l *lexer) isNotEmpty() bool { + return l.pos > l.start +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + i := <-l.items + l.lastPos = i.pos + return i +} + +// lex creates a new scanner for the input string. +func lex(input string) *lexer { + l := &lexer{ + input: input, + items: make(chan item), + runes: make([]rune, 0, 32), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexBeforeKey(l); l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +// lexBeforeKey scans until a key begins. +func lexBeforeKey(l *lexer) stateFn { + switch r := l.next(); { + case isEOF(r): + l.emit(itemEOF) + return nil + + case isEOL(r): + l.ignore() + return lexBeforeKey + + case isComment(r): + return lexComment + + case isWhitespace(r): + l.ignore() + return lexBeforeKey + + default: + l.backup() + return lexKey + } +} + +// lexComment scans a comment line. The comment character has already been scanned. +func lexComment(l *lexer) stateFn { + l.acceptRun(whitespace) + l.ignore() + for { + switch r := l.next(); { + case isEOF(r): + l.ignore() + l.emit(itemEOF) + return nil + case isEOL(r): + l.emit(itemComment) + return lexBeforeKey + default: + l.appendRune(r) + } + } +} + +// lexKey scans the key up to a delimiter +func lexKey(l *lexer) stateFn { + var r rune + +Loop: + for { + switch r = l.next(); { + + case isEscape(r): + err := l.scanEscapeSequence() + if err != nil { + return l.errorf(err.Error()) + } + + case isEndOfKey(r): + l.backup() + break Loop + + case isEOF(r): + break Loop + + default: + l.appendRune(r) + } + } + + if len(l.runes) > 0 { + l.emit(itemKey) + } + + if isEOF(r) { + l.emit(itemEOF) + return nil + } + + return lexBeforeValue +} + +// lexBeforeValue scans the delimiter between key and value. +// Leading and trailing whitespace is ignored. +// We expect to be just after the key. +func lexBeforeValue(l *lexer) stateFn { + l.acceptRun(whitespace) + l.accept(":=") + l.acceptRun(whitespace) + l.ignore() + return lexValue +} + +// lexValue scans text until the end of the line. We expect to be just after the delimiter. +func lexValue(l *lexer) stateFn { + for { + switch r := l.next(); { + case isEscape(r): + if isEOL(l.peek()) { + l.next() + l.acceptRun(whitespace) + } else { + err := l.scanEscapeSequence() + if err != nil { + return l.errorf(err.Error()) + } + } + + case isEOL(r): + l.emit(itemValue) + l.ignore() + return lexBeforeKey + + case isEOF(r): + l.emit(itemValue) + l.emit(itemEOF) + return nil + + default: + l.appendRune(r) + } + } +} + +// scanEscapeSequence scans either one of the escaped characters +// or a unicode literal. We expect to be after the escape character. +func (l *lexer) scanEscapeSequence() error { + switch r := l.next(); { + + case isEscapedCharacter(r): + l.appendRune(decodeEscapedCharacter(r)) + return nil + + case atUnicodeLiteral(r): + return l.scanUnicodeLiteral() + + case isEOF(r): + return fmt.Errorf("premature EOF") + + // silently drop the escape character and append the rune as is + default: + l.appendRune(r) + return nil + } +} + +// scans a unicode literal in the form \uXXXX. We expect to be after the \u. +func (l *lexer) scanUnicodeLiteral() error { + // scan the digits + d := make([]rune, 4) + for i := 0; i < 4; i++ { + d[i] = l.next() + if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { + return fmt.Errorf("invalid unicode literal") + } + } + + // decode the digits into a rune + r, err := strconv.ParseInt(string(d), 16, 0) + if err != nil { + return err + } + + l.appendRune(rune(r)) + return nil +} + +// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. +func decodeEscapedCharacter(r rune) rune { + switch r { + case 'f': + return '\f' + case 'n': + return '\n' + case 'r': + return '\r' + case 't': + return '\t' + default: + return r + } +} + +// atUnicodeLiteral reports whether we are at a unicode literal. +// The escape character has already been consumed. +func atUnicodeLiteral(r rune) bool { + return r == 'u' +} + +// isComment reports whether we are at the start of a comment. +func isComment(r rune) bool { + return r == '#' || r == '!' +} + +// isEndOfKey reports whether the rune terminates the current key. +func isEndOfKey(r rune) bool { + return strings.ContainsRune(" \f\t\r\n:=", r) +} + +// isEOF reports whether we are at EOF. +func isEOF(r rune) bool { + return r == eof +} + +// isEOL reports whether we are at a new line character. +func isEOL(r rune) bool { + return r == '\n' || r == '\r' +} + +// isEscape reports whether the rune is the escape character which +// prefixes unicode literals and other escaped characters. +func isEscape(r rune) bool { + return r == '\\' +} + +// isEscapedCharacter reports whether we are at one of the characters that need escaping. +// The escape character has already been consumed. +func isEscapedCharacter(r rune) bool { + return strings.ContainsRune(" :=fnrt", r) +} + +// isWhitespace reports whether the rune is a whitespace character. +func isWhitespace(r rune) bool { + return strings.ContainsRune(whitespace, r) +} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go new file mode 100644 index 000000000..c8e1b5804 --- /dev/null +++ b/vendor/github.com/magiconair/properties/load.go @@ -0,0 +1,292 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" +) + +// Encoding specifies encoding of the input data. +type Encoding uint + +const ( + // utf8Default is a private placeholder for the zero value of Encoding to + // ensure that it has the correct meaning. UTF8 is the default encoding but + // was assigned a non-zero value which cannot be changed without breaking + // existing code. Clients should continue to use the public constants. + utf8Default Encoding = iota + + // UTF8 interprets the input data as UTF-8. + UTF8 + + // ISO_8859_1 interprets the input data as ISO-8859-1. + ISO_8859_1 +) + +type Loader struct { + // Encoding determines how the data from files and byte buffers + // is interpreted. For URLs the Content-Type header is used + // to determine the encoding of the data. + Encoding Encoding + + // DisableExpansion configures the property expansion of the + // returned property object. When set to true, the property values + // will not be expanded and the Property object will not be checked + // for invalid expansion expressions. + DisableExpansion bool + + // IgnoreMissing configures whether missing files or URLs which return + // 404 are reported as errors. When set to true, missing files and 404 + // status codes are not reported as errors. + IgnoreMissing bool +} + +// Load reads a buffer into a Properties struct. +func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { + return l.loadBytes(buf, l.Encoding) +} + +// LoadAll reads the content of multiple URLs or files in the given order into +// a Properties struct. If IgnoreMissing is true then a 404 status code or +// missing file will not be reported as error. Encoding sets the encoding for +// files. For the URLs see LoadURL for the Content-Type header and the +// encoding. +func (l *Loader) LoadAll(names []string) (*Properties, error) { + all := NewProperties() + for _, name := range names { + n, err := expandName(name) + if err != nil { + return nil, err + } + + var p *Properties + switch { + case strings.HasPrefix(n, "http://"): + p, err = l.LoadURL(n) + case strings.HasPrefix(n, "https://"): + p, err = l.LoadURL(n) + default: + p, err = l.LoadFile(n) + } + if err != nil { + return nil, err + } + all.Merge(p) + } + + all.DisableExpansion = l.DisableExpansion + if all.DisableExpansion { + return all, nil + } + return all, all.check() +} + +// LoadFile reads a file into a Properties struct. +// If IgnoreMissing is true then a missing file will not be +// reported as error. +func (l *Loader) LoadFile(filename string) (*Properties, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if l.IgnoreMissing && os.IsNotExist(err) { + LogPrintf("properties: %s not found. skipping", filename) + return NewProperties(), nil + } + return nil, err + } + return l.loadBytes(data, l.Encoding) +} + +// LoadURL reads the content of the URL into a Properties struct. +// +// The encoding is determined via the Content-Type header which +// should be set to 'text/plain'. If the 'charset' parameter is +// missing, 'iso-8859-1' or 'latin1' the encoding is set to +// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the +// encoding is set to UTF-8. A missing content type header is +// interpreted as 'text/plain; charset=utf-8'. +func (l *Loader) LoadURL(url string) (*Properties, error) { + resp, err := http.Get(url) + if err != nil { + return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) + } + + if resp.StatusCode == 404 && l.IgnoreMissing { + LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) + return NewProperties(), nil + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) + } + defer resp.Body.Close() + + ct := resp.Header.Get("Content-Type") + var enc Encoding + switch strings.ToLower(ct) { + case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1": + enc = ISO_8859_1 + case "", "text/plain; charset=utf-8": + enc = UTF8 + default: + return nil, fmt.Errorf("properties: invalid content type %s", ct) + } + + return l.loadBytes(body, enc) +} + +func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) { + p, err := parse(convert(buf, enc)) + if err != nil { + return nil, err + } + p.DisableExpansion = l.DisableExpansion + if p.DisableExpansion { + return p, nil + } + return p, p.check() +} + +// Load reads a buffer into a Properties struct. +func Load(buf []byte, enc Encoding) (*Properties, error) { + l := &Loader{Encoding: enc} + return l.LoadBytes(buf) +} + +// LoadString reads an UTF8 string into a properties struct. +func LoadString(s string) (*Properties, error) { + l := &Loader{Encoding: UTF8} + return l.LoadBytes([]byte(s)) +} + +// LoadMap creates a new Properties struct from a string map. +func LoadMap(m map[string]string) *Properties { + p := NewProperties() + for k, v := range m { + p.Set(k, v) + } + return p +} + +// LoadFile reads a file into a Properties struct. +func LoadFile(filename string, enc Encoding) (*Properties, error) { + l := &Loader{Encoding: enc} + return l.LoadAll([]string{filename}) +} + +// LoadFiles reads multiple files in the given order into +// a Properties struct. If 'ignoreMissing' is true then +// non-existent files will not be reported as error. +func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} + return l.LoadAll(filenames) +} + +// LoadURL reads the content of the URL into a Properties struct. +// See Loader#LoadURL for details. +func LoadURL(url string) (*Properties, error) { + l := &Loader{Encoding: UTF8} + return l.LoadAll([]string{url}) +} + +// LoadURLs reads the content of multiple URLs in the given order into a +// Properties struct. If IgnoreMissing is true then a 404 status code will +// not be reported as error. See Loader#LoadURL for the Content-Type header +// and the encoding. +func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { + l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing} + return l.LoadAll(urls) +} + +// LoadAll reads the content of multiple URLs or files in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will +// not be reported as error. Encoding sets the encoding for files. For the URLs please see +// LoadURL for the Content-Type header and the encoding. +func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} + return l.LoadAll(names) +} + +// MustLoadString reads an UTF8 string into a Properties struct and +// panics on error. +func MustLoadString(s string) *Properties { + return must(LoadString(s)) +} + +// MustLoadFile reads a file into a Properties struct and +// panics on error. +func MustLoadFile(filename string, enc Encoding) *Properties { + return must(LoadFile(filename, enc)) +} + +// MustLoadFiles reads multiple files in the given order into +// a Properties struct and panics on error. If 'ignoreMissing' +// is true then non-existent files will not be reported as error. +func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { + return must(LoadFiles(filenames, enc, ignoreMissing)) +} + +// MustLoadURL reads the content of a URL into a Properties struct and +// panics on error. +func MustLoadURL(url string) *Properties { + return must(LoadURL(url)) +} + +// MustLoadURLs reads the content of multiple URLs in the given order into a +// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 +// status code will not be reported as error. +func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { + return must(LoadURLs(urls, ignoreMissing)) +} + +// MustLoadAll reads the content of multiple URLs or files in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will +// not be reported as error. Encoding sets the encoding for files. For the URLs please see +// LoadURL for the Content-Type header and the encoding. It panics on error. +func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { + return must(LoadAll(names, enc, ignoreMissing)) +} + +func must(p *Properties, err error) *Properties { + if err != nil { + ErrorHandler(err) + } + return p +} + +// expandName expands ${ENV_VAR} expressions in a name. +// If the environment variable does not exist then it will be replaced +// with an empty string. Malformed expressions like "${ENV_VAR" will +// be reported as error. +func expandName(name string) (string, error) { + return expand(name, []string{}, "${", "}", make(map[string]string)) +} + +// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. +// For ISO-8859-1 we can convert each byte straight into a rune since the +// first 256 unicode code points cover ISO-8859-1. +func convert(buf []byte, enc Encoding) string { + switch enc { + case utf8Default, UTF8: + return string(buf) + case ISO_8859_1: + runes := make([]rune, len(buf)) + for i, b := range buf { + runes[i] = rune(b) + } + return string(runes) + default: + ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) + } + panic("ErrorHandler should exit") +} diff --git a/vendor/github.com/magiconair/properties/load_test.go b/vendor/github.com/magiconair/properties/load_test.go new file mode 100644 index 000000000..db28b9939 --- /dev/null +++ b/vendor/github.com/magiconair/properties/load_test.go @@ -0,0 +1,243 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + "github.com/magiconair/properties/assert" +) + +func TestEncoding(t *testing.T) { + if got, want := utf8Default, Encoding(0); got != want { + t.Fatalf("got encoding %d want %d", got, want) + } + if got, want := UTF8, Encoding(1); got != want { + t.Fatalf("got encoding %d want %d", got, want) + } + if got, want := ISO_8859_1, Encoding(2); got != want { + t.Fatalf("got encoding %d want %d", got, want) + } +} + +func TestLoadFailsWithNotExistingFile(t *testing.T) { + _, err := LoadFile("doesnotexist.properties", ISO_8859_1) + assert.Equal(t, err != nil, true, "") + assert.Matches(t, err.Error(), "open.*no such file or directory") +} + +func TestLoadFilesFailsOnNotExistingFile(t *testing.T) { + _, err := LoadFile("doesnotexist.properties", ISO_8859_1) + assert.Equal(t, err != nil, true, "") + assert.Matches(t, err.Error(), "open.*no such file or directory") +} + +func TestLoadFilesDoesNotFailOnNotExistingFileAndIgnoreMissing(t *testing.T) { + p, err := LoadFiles([]string{"doesnotexist.properties"}, ISO_8859_1, true) + assert.Equal(t, err, nil) + assert.Equal(t, p.Len(), 0) +} + +func TestLoadString(t *testing.T) { + x := "key=äüö" + p1 := MustLoadString(x) + p2 := must(Load([]byte(x), UTF8)) + assert.Equal(t, p1, p2) +} + +func TestLoadMap(t *testing.T) { + // LoadMap does not guarantee the same import order + // of keys every time since map access is randomized. + // Therefore, we need to compare the generated maps. + m := map[string]string{"key": "value", "abc": "def"} + assert.Equal(t, LoadMap(m).Map(), m) +} + +func TestLoadFile(t *testing.T) { + tf := make(tempFiles, 0) + defer tf.removeAll() + + filename := tf.makeFile("key=value") + p := MustLoadFile(filename, ISO_8859_1) + + assert.Equal(t, p.Len(), 1) + assertKeyValues(t, "", p, "key", "value") +} + +func TestLoadFiles(t *testing.T) { + tf := make(tempFiles, 0) + defer tf.removeAll() + + filename := tf.makeFile("key=value") + filename2 := tf.makeFile("key2=value2") + p := MustLoadFiles([]string{filename, filename2}, ISO_8859_1, false) + assertKeyValues(t, "", p, "key", "value", "key2", "value2") +} + +func TestLoadExpandedFile(t *testing.T) { + tf := make(tempFiles, 0) + defer tf.removeAll() + + if err := os.Setenv("_VARX", "some-value"); err != nil { + t.Fatal(err) + } + filename := tf.makeFilePrefix(os.Getenv("_VARX"), "key=value") + filename = strings.Replace(filename, os.Getenv("_VARX"), "${_VARX}", -1) + p := MustLoadFile(filename, ISO_8859_1) + assertKeyValues(t, "", p, "key", "value") +} + +func TestLoadFilesAndIgnoreMissing(t *testing.T) { + tf := make(tempFiles, 0) + defer tf.removeAll() + + filename := tf.makeFile("key=value") + filename2 := tf.makeFile("key2=value2") + p := MustLoadFiles([]string{filename, filename + "foo", filename2, filename2 + "foo"}, ISO_8859_1, true) + assertKeyValues(t, "", p, "key", "value", "key2", "value2") +} + +func TestLoadURL(t *testing.T) { + srv := testServer() + defer srv.Close() + p := MustLoadURL(srv.URL + "/a") + assertKeyValues(t, "", p, "key", "value") +} + +func TestLoadURLs(t *testing.T) { + srv := testServer() + defer srv.Close() + p := MustLoadURLs([]string{srv.URL + "/a", srv.URL + "/b"}, false) + assertKeyValues(t, "", p, "key", "value", "key2", "value2") +} + +func TestLoadURLsAndFailMissing(t *testing.T) { + srv := testServer() + defer srv.Close() + p, err := LoadURLs([]string{srv.URL + "/a", srv.URL + "/c"}, false) + assert.Equal(t, p, (*Properties)(nil)) + assert.Matches(t, err.Error(), ".*returned 404.*") +} + +func TestLoadURLsAndIgnoreMissing(t *testing.T) { + srv := testServer() + defer srv.Close() + p := MustLoadURLs([]string{srv.URL + "/a", srv.URL + "/b", srv.URL + "/c"}, true) + assertKeyValues(t, "", p, "key", "value", "key2", "value2") +} + +func TestLoadURLEncoding(t *testing.T) { + srv := testServer() + defer srv.Close() + + uris := []string{"/none", "/utf8", "/plain", "/latin1", "/iso88591"} + for i, uri := range uris { + p := MustLoadURL(srv.URL + uri) + assert.Equal(t, p.GetString("key", ""), "äöü", fmt.Sprintf("%d", i)) + } +} + +func TestLoadURLFailInvalidEncoding(t *testing.T) { + srv := testServer() + defer srv.Close() + + p, err := LoadURL(srv.URL + "/json") + assert.Equal(t, p, (*Properties)(nil)) + assert.Matches(t, err.Error(), ".*invalid content type.*") +} + +func TestLoadAll(t *testing.T) { + tf := make(tempFiles, 0) + defer tf.removeAll() + + filename := tf.makeFile("key=value") + filename2 := tf.makeFile("key2=value3") + filename3 := tf.makeFile("key=value4") + srv := testServer() + defer srv.Close() + p := MustLoadAll([]string{filename, filename2, srv.URL + "/a", srv.URL + "/b", filename3}, UTF8, false) + assertKeyValues(t, "", p, "key", "value4", "key2", "value2") +} + +type tempFiles []string + +func (tf *tempFiles) removeAll() { + for _, path := range *tf { + err := os.Remove(path) + if err != nil { + fmt.Printf("os.Remove: %v", err) + } + } +} + +func (tf *tempFiles) makeFile(data string) string { + return tf.makeFilePrefix("properties", data) +} + +func (tf *tempFiles) makeFilePrefix(prefix, data string) string { + f, err := ioutil.TempFile("", prefix) + if err != nil { + panic("ioutil.TempFile: " + err.Error()) + } + + // remember the temp file so that we can remove it later + *tf = append(*tf, f.Name()) + + n, err := fmt.Fprint(f, data) + if err != nil { + panic("fmt.Fprintln: " + err.Error()) + } + if n != len(data) { + panic(fmt.Sprintf("Data size mismatch. expected=%d wrote=%d\n", len(data), n)) + } + + err = f.Close() + if err != nil { + panic("f.Close: " + err.Error()) + } + + return f.Name() +} + +func testServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + send := func(data []byte, contentType string) { + w.Header().Set("Content-Type", contentType) + if _, err := w.Write(data); err != nil { + panic(err) + } + } + + utf8 := []byte("key=äöü") + iso88591 := []byte{0x6b, 0x65, 0x79, 0x3d, 0xe4, 0xf6, 0xfc} // key=äöü + + switch r.RequestURI { + case "/a": + send([]byte("key=value"), "") + case "/b": + send([]byte("key2=value2"), "") + case "/none": + send(utf8, "") + case "/utf8": + send(utf8, "text/plain; charset=utf-8") + case "/json": + send(utf8, "application/json; charset=utf-8") + case "/plain": + send(iso88591, "text/plain") + case "/latin1": + send(iso88591, "text/plain; charset=latin1") + case "/iso88591": + send(iso88591, "text/plain; charset=iso-8859-1") + default: + w.WriteHeader(404) + } + })) +} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go new file mode 100644 index 000000000..cdc4a8034 --- /dev/null +++ b/vendor/github.com/magiconair/properties/parser.go @@ -0,0 +1,95 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "runtime" +) + +type parser struct { + lex *lexer +} + +func parse(input string) (properties *Properties, err error) { + p := &parser{lex: lex(input)} + defer p.recover(&err) + + properties = NewProperties() + key := "" + comments := []string{} + + for { + token := p.expectOneOf(itemComment, itemKey, itemEOF) + switch token.typ { + case itemEOF: + goto done + case itemComment: + comments = append(comments, token.val) + continue + case itemKey: + key = token.val + if _, ok := properties.m[key]; !ok { + properties.k = append(properties.k, key) + } + } + + token = p.expectOneOf(itemValue, itemEOF) + if len(comments) > 0 { + properties.c[key] = comments + comments = []string{} + } + switch token.typ { + case itemEOF: + properties.m[key] = "" + goto done + case itemValue: + properties.m[key] = token.val + } + } + +done: + return properties, nil +} + +func (p *parser) errorf(format string, args ...interface{}) { + format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +func (p *parser) expect(expected itemType) (token item) { + token = p.lex.nextItem() + if token.typ != expected { + p.unexpected(token) + } + return token +} + +func (p *parser) expectOneOf(expected ...itemType) (token item) { + token = p.lex.nextItem() + for _, v := range expected { + if token.typ == v { + return token + } + } + p.unexpected(token) + panic("unexpected token") +} + +func (p *parser) unexpected(token item) { + p.errorf(token.String()) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (p *parser) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + *errp = e.(error) + } + return +} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go new file mode 100644 index 000000000..cb3d1a332 --- /dev/null +++ b/vendor/github.com/magiconair/properties/properties.go @@ -0,0 +1,833 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. +// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. + +import ( + "fmt" + "io" + "log" + "os" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +const maxExpansionDepth = 64 + +// ErrorHandlerFunc defines the type of function which handles failures +// of the MustXXX() functions. An error handler function must exit +// the application after handling the error. +type ErrorHandlerFunc func(error) + +// ErrorHandler is the function which handles failures of the MustXXX() +// functions. The default is LogFatalHandler. +var ErrorHandler ErrorHandlerFunc = LogFatalHandler + +// LogHandlerFunc defines the function prototype for logging errors. +type LogHandlerFunc func(fmt string, args ...interface{}) + +// LogPrintf defines a log handler which uses log.Printf. +var LogPrintf LogHandlerFunc = log.Printf + +// LogFatalHandler handles the error by logging a fatal error and exiting. +func LogFatalHandler(err error) { + log.Fatal(err) +} + +// PanicHandler handles the error by panicking. +func PanicHandler(err error) { + panic(err) +} + +// ----------------------------------------------------------------------------- + +// A Properties contains the key/value pairs from the properties input. +// All values are stored in unexpanded form and are expanded at runtime +type Properties struct { + // Pre-/Postfix for property expansion. + Prefix string + Postfix string + + // DisableExpansion controls the expansion of properties on Get() + // and the check for circular references on Set(). When set to + // true Properties behaves like a simple key/value store and does + // not check for circular references on Get() or on Set(). + DisableExpansion bool + + // Stores the key/value pairs + m map[string]string + + // Stores the comments per key. + c map[string][]string + + // Stores the keys in order of appearance. + k []string +} + +// NewProperties creates a new Properties struct with the default +// configuration for "${key}" expressions. +func NewProperties() *Properties { + return &Properties{ + Prefix: "${", + Postfix: "}", + m: map[string]string{}, + c: map[string][]string{}, + k: []string{}, + } +} + +// Load reads a buffer into the given Properties struct. +func (p *Properties) Load(buf []byte, enc Encoding) error { + l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion} + newProperties, err := l.LoadBytes(buf) + if err != nil { + return err + } + p.Merge(newProperties) + return nil +} + +// Get returns the expanded value for the given key if exists. +// Otherwise, ok is false. +func (p *Properties) Get(key string) (value string, ok bool) { + v, ok := p.m[key] + if p.DisableExpansion { + return v, ok + } + if !ok { + return "", false + } + + expanded, err := p.expand(key, v) + + // we guarantee that the expanded value is free of + // circular references and malformed expressions + // so we panic if we still get an error here. + if err != nil { + ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v)) + } + + return expanded, true +} + +// MustGet returns the expanded value for the given key if exists. +// Otherwise, it panics. +func (p *Properties) MustGet(key string) string { + if v, ok := p.Get(key); ok { + return v + } + ErrorHandler(invalidKeyError(key)) + panic("ErrorHandler should exit") +} + +// ---------------------------------------------------------------------------- + +// ClearComments removes the comments for all keys. +func (p *Properties) ClearComments() { + p.c = map[string][]string{} +} + +// ---------------------------------------------------------------------------- + +// GetComment returns the last comment before the given key or an empty string. +func (p *Properties) GetComment(key string) string { + comments, ok := p.c[key] + if !ok || len(comments) == 0 { + return "" + } + return comments[len(comments)-1] +} + +// ---------------------------------------------------------------------------- + +// GetComments returns all comments that appeared before the given key or nil. +func (p *Properties) GetComments(key string) []string { + if comments, ok := p.c[key]; ok { + return comments + } + return nil +} + +// ---------------------------------------------------------------------------- + +// SetComment sets the comment for the key. +func (p *Properties) SetComment(key, comment string) { + p.c[key] = []string{comment} +} + +// ---------------------------------------------------------------------------- + +// SetComments sets the comments for the key. If the comments are nil then +// all comments for this key are deleted. +func (p *Properties) SetComments(key string, comments []string) { + if comments == nil { + delete(p.c, key) + return + } + p.c[key] = comments +} + +// ---------------------------------------------------------------------------- + +// GetBool checks if the expanded value is one of '1', 'yes', +// 'true' or 'on' if the key exists. The comparison is case-insensitive. +// If the key does not exist the default value is returned. +func (p *Properties) GetBool(key string, def bool) bool { + v, err := p.getBool(key) + if err != nil { + return def + } + return v +} + +// MustGetBool checks if the expanded value is one of '1', 'yes', +// 'true' or 'on' if the key exists. The comparison is case-insensitive. +// If the key does not exist the function panics. +func (p *Properties) MustGetBool(key string) bool { + v, err := p.getBool(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getBool(key string) (value bool, err error) { + if v, ok := p.Get(key); ok { + return boolVal(v), nil + } + return false, invalidKeyError(key) +} + +func boolVal(v string) bool { + v = strings.ToLower(v) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + +// ---------------------------------------------------------------------------- + +// GetDuration parses the expanded value as an time.Duration (in ns) if the +// key exists. If key does not exist or the value cannot be parsed the default +// value is returned. In almost all cases you want to use GetParsedDuration(). +func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { + v, err := p.getInt64(key) + if err != nil { + return def + } + return time.Duration(v) +} + +// MustGetDuration parses the expanded value as an time.Duration (in ns) if +// the key exists. If key does not exist or the value cannot be parsed the +// function panics. In almost all cases you want to use MustGetParsedDuration(). +func (p *Properties) MustGetDuration(key string) time.Duration { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return time.Duration(v) +} + +// ---------------------------------------------------------------------------- + +// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { + s, ok := p.Get(key) + if !ok { + return def + } + v, err := time.ParseDuration(s) + if err != nil { + return def + } + return v +} + +// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetParsedDuration(key string) time.Duration { + s, ok := p.Get(key) + if !ok { + ErrorHandler(invalidKeyError(key)) + } + v, err := time.ParseDuration(s) + if err != nil { + ErrorHandler(err) + } + return v +} + +// ---------------------------------------------------------------------------- + +// GetFloat64 parses the expanded value as a float64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetFloat64(key string, def float64) float64 { + v, err := p.getFloat64(key) + if err != nil { + return def + } + return v +} + +// MustGetFloat64 parses the expanded value as a float64 if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetFloat64(key string) float64 { + v, err := p.getFloat64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getFloat64(key string) (value float64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseFloat(v, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetInt parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. If the value does not fit into an int the +// function panics with an out of range error. +func (p *Properties) GetInt(key string, def int) int { + v, err := p.getInt64(key) + if err != nil { + return def + } + return intRangeCheck(key, v) +} + +// MustGetInt parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +// If the value does not fit into an int the function panics with +// an out of range error. +func (p *Properties) MustGetInt(key string) int { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return intRangeCheck(key, v) +} + +// ---------------------------------------------------------------------------- + +// GetInt64 parses the expanded value as an int64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetInt64(key string, def int64) int64 { + v, err := p.getInt64(key) + if err != nil { + return def + } + return v +} + +// MustGetInt64 parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetInt64(key string) int64 { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getInt64(key string) (value int64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetUint parses the expanded value as an uint if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. If the value does not fit into an int the +// function panics with an out of range error. +func (p *Properties) GetUint(key string, def uint) uint { + v, err := p.getUint64(key) + if err != nil { + return def + } + return uintRangeCheck(key, v) +} + +// MustGetUint parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +// If the value does not fit into an int the function panics with +// an out of range error. +func (p *Properties) MustGetUint(key string) uint { + v, err := p.getUint64(key) + if err != nil { + ErrorHandler(err) + } + return uintRangeCheck(key, v) +} + +// ---------------------------------------------------------------------------- + +// GetUint64 parses the expanded value as an uint64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetUint64(key string, def uint64) uint64 { + v, err := p.getUint64(key) + if err != nil { + return def + } + return v +} + +// MustGetUint64 parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetUint64(key string) uint64 { + v, err := p.getUint64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getUint64(key string) (value uint64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetString returns the expanded value for the given key if exists or +// the default value otherwise. +func (p *Properties) GetString(key, def string) string { + if v, ok := p.Get(key); ok { + return v + } + return def +} + +// MustGetString returns the expanded value for the given key if exists or +// panics otherwise. +func (p *Properties) MustGetString(key string) string { + if v, ok := p.Get(key); ok { + return v + } + ErrorHandler(invalidKeyError(key)) + panic("ErrorHandler should exit") +} + +// ---------------------------------------------------------------------------- + +// Filter returns a new properties object which contains all properties +// for which the key matches the pattern. +func (p *Properties) Filter(pattern string) (*Properties, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + + return p.FilterRegexp(re), nil +} + +// FilterRegexp returns a new properties object which contains all properties +// for which the key matches the regular expression. +func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { + pp := NewProperties() + for _, k := range p.k { + if re.MatchString(k) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) + pp.Set(k, p.m[k]) + } + } + return pp +} + +// FilterPrefix returns a new properties object with a subset of all keys +// with the given prefix. +func (p *Properties) FilterPrefix(prefix string) *Properties { + pp := NewProperties() + for _, k := range p.k { + if strings.HasPrefix(k, prefix) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) + pp.Set(k, p.m[k]) + } + } + return pp +} + +// FilterStripPrefix returns a new properties object with a subset of all keys +// with the given prefix and the prefix removed from the keys. +func (p *Properties) FilterStripPrefix(prefix string) *Properties { + pp := NewProperties() + n := len(prefix) + for _, k := range p.k { + if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference + // TODO(fs): this function should probably return an error but the signature is fixed + pp.Set(k[n:], p.m[k]) + } + } + return pp +} + +// Len returns the number of keys. +func (p *Properties) Len() int { + return len(p.m) +} + +// Keys returns all keys in the same order as in the input. +func (p *Properties) Keys() []string { + keys := make([]string, len(p.k)) + copy(keys, p.k) + return keys +} + +// Set sets the property key to the corresponding value. +// If a value for key existed before then ok is true and prev +// contains the previous value. If the value contains a +// circular reference or a malformed expression then +// an error is returned. +// An empty key is silently ignored. +func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { + if key == "" { + return "", false, nil + } + + // if expansion is disabled we allow circular references + if p.DisableExpansion { + prev, ok = p.Get(key) + p.m[key] = value + if !ok { + p.k = append(p.k, key) + } + return prev, ok, nil + } + + // to check for a circular reference we temporarily need + // to set the new value. If there is an error then revert + // to the previous state. Only if all tests are successful + // then we add the key to the p.k list. + prev, ok = p.Get(key) + p.m[key] = value + + // now check for a circular reference + _, err = p.expand(key, value) + if err != nil { + + // revert to the previous state + if ok { + p.m[key] = prev + } else { + delete(p.m, key) + } + + return "", false, err + } + + if !ok { + p.k = append(p.k, key) + } + + return prev, ok, nil +} + +// SetValue sets property key to the default string value +// as defined by fmt.Sprintf("%v"). +func (p *Properties) SetValue(key string, value interface{}) error { + _, _, err := p.Set(key, fmt.Sprintf("%v", value)) + return err +} + +// MustSet sets the property key to the corresponding value. +// If a value for key existed before then ok is true and prev +// contains the previous value. An empty key is silently ignored. +func (p *Properties) MustSet(key, value string) (prev string, ok bool) { + prev, ok, err := p.Set(key, value) + if err != nil { + ErrorHandler(err) + } + return prev, ok +} + +// String returns a string of all expanded 'key = value' pairs. +func (p *Properties) String() string { + var s string + for _, key := range p.k { + value, _ := p.Get(key) + s = fmt.Sprintf("%s%s = %s\n", s, key, value) + } + return s +} + +// Write writes all unexpanded 'key = value' pairs to the given writer. +// Write returns the number of bytes written and any write error encountered. +func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { + return p.WriteComment(w, "", enc) +} + +// WriteComment writes all unexpanced 'key = value' pairs to the given writer. +// If prefix is not empty then comments are written with a blank line and the +// given prefix. The prefix should be either "# " or "! " to be compatible with +// the properties file format. Otherwise, the properties parser will not be +// able to read the file back in. It returns the number of bytes written and +// any write error encountered. +func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { + var x int + + for _, key := range p.k { + value := p.m[key] + + if prefix != "" { + if comments, ok := p.c[key]; ok { + // don't print comments if they are all empty + allEmpty := true + for _, c := range comments { + if c != "" { + allEmpty = false + break + } + } + + if !allEmpty { + // add a blank line between entries but not at the top + if len(comments) > 0 && n > 0 { + x, err = fmt.Fprintln(w) + if err != nil { + return + } + n += x + } + + for _, c := range comments { + x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc)) + if err != nil { + return + } + n += x + } + } + } + } + + x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc)) + if err != nil { + return + } + n += x + } + return +} + +// Map returns a copy of the properties as a map. +func (p *Properties) Map() map[string]string { + m := make(map[string]string) + for k, v := range p.m { + m[k] = v + } + return m +} + +// FilterFunc returns a copy of the properties which includes the values which passed all filters. +func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { + pp := NewProperties() +outer: + for k, v := range p.m { + for _, f := range filters { + if !f(k, v) { + continue outer + } + pp.Set(k, v) + } + } + return pp +} + +// ---------------------------------------------------------------------------- + +// Delete removes the key and its comments. +func (p *Properties) Delete(key string) { + delete(p.m, key) + delete(p.c, key) + newKeys := []string{} + for _, k := range p.k { + if k != key { + newKeys = append(newKeys, k) + } + } + p.k = newKeys +} + +// Merge merges properties, comments and keys from other *Properties into p +func (p *Properties) Merge(other *Properties) { + for k, v := range other.m { + p.m[k] = v + } + for k, v := range other.c { + p.c[k] = v + } + +outer: + for _, otherKey := range other.k { + for _, key := range p.k { + if otherKey == key { + continue outer + } + } + p.k = append(p.k, otherKey) + } +} + +// ---------------------------------------------------------------------------- + +// check expands all values and returns an error if a circular reference or +// a malformed expression was found. +func (p *Properties) check() error { + for key, value := range p.m { + if _, err := p.expand(key, value); err != nil { + return err + } + } + return nil +} + +func (p *Properties) expand(key, input string) (string, error) { + // no pre/postfix -> nothing to expand + if p.Prefix == "" && p.Postfix == "" { + return input, nil + } + + return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) +} + +// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. +// The function keeps track of the keys that were already expanded and stops if it +// detects a circular reference or a malformed expression of the form '(prefix)key'. +func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { + if len(keys) > maxExpansionDepth { + return "", fmt.Errorf("expansion too deep") + } + + for { + start := strings.Index(s, prefix) + if start == -1 { + return s, nil + } + + keyStart := start + len(prefix) + keyLen := strings.Index(s[keyStart:], postfix) + if keyLen == -1 { + return "", fmt.Errorf("malformed expression") + } + + end := keyStart + keyLen + len(postfix) - 1 + key := s[keyStart : keyStart+keyLen] + + // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) + + for _, k := range keys { + if key == k { + return "", fmt.Errorf("circular reference") + } + } + + val, ok := values[key] + if !ok { + val = os.Getenv(key) + } + new_val, err := expand(val, append(keys, key), prefix, postfix, values) + if err != nil { + return "", err + } + s = s[:start] + new_val + s[end+1:] + } + return s, nil +} + +// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. +func encode(s string, special string, enc Encoding) string { + switch enc { + case UTF8: + return encodeUtf8(s, special) + case ISO_8859_1: + return encodeIso(s, special) + default: + panic(fmt.Sprintf("unsupported encoding %v", enc)) + } +} + +func encodeUtf8(s string, special string) string { + v := "" + for pos := 0; pos < len(s); { + r, w := utf8.DecodeRuneInString(s[pos:]) + pos += w + v += escape(r, special) + } + return v +} + +func encodeIso(s string, special string) string { + var r rune + var w int + var v string + for pos := 0; pos < len(s); { + switch r, w = utf8.DecodeRuneInString(s[pos:]); { + case r < 1<<8: // single byte rune -> escape special chars only + v += escape(r, special) + case r < 1<<16: // two byte rune -> unicode literal + v += fmt.Sprintf("\\u%04x", r) + default: // more than two bytes per rune -> can't encode + v += "?" + } + pos += w + } + return v +} + +func escape(r rune, special string) string { + switch r { + case '\f': + return "\\f" + case '\n': + return "\\n" + case '\r': + return "\\r" + case '\t': + return "\\t" + default: + if strings.ContainsRune(special, r) { + return "\\" + string(r) + } + return string(r) + } +} + +func invalidKeyError(key string) error { + return fmt.Errorf("unknown property: %s", key) +} diff --git a/vendor/github.com/magiconair/properties/properties_test.go b/vendor/github.com/magiconair/properties/properties_test.go new file mode 100644 index 000000000..5db64c3e5 --- /dev/null +++ b/vendor/github.com/magiconair/properties/properties_test.go @@ -0,0 +1,985 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "bytes" + "flag" + "fmt" + "math" + "os" + "reflect" + "strings" + "testing" + "time" + + "github.com/magiconair/properties/assert" +) + +var verbose = flag.Bool("verbose", false, "Verbose output") + +func init() { + ErrorHandler = PanicHandler +} + +// ---------------------------------------------------------------------------- + +// define test cases in the form of +// {"input", "key1", "value1", "key2", "value2", ...} +var complexTests = [][]string{ + // whitespace prefix + {" key=value", "key", "value"}, // SPACE prefix + {"\fkey=value", "key", "value"}, // FF prefix + {"\tkey=value", "key", "value"}, // TAB prefix + {" \f\tkey=value", "key", "value"}, // mix prefix + + // multiple keys + {"key1=value1\nkey2=value2\n", "key1", "value1", "key2", "value2"}, + {"key1=value1\rkey2=value2\r", "key1", "value1", "key2", "value2"}, + {"key1=value1\r\nkey2=value2\r\n", "key1", "value1", "key2", "value2"}, + + // blank lines + {"\nkey=value\n", "key", "value"}, + {"\rkey=value\r", "key", "value"}, + {"\r\nkey=value\r\n", "key", "value"}, + {"\nkey=value\n \nkey2=value2", "key", "value", "key2", "value2"}, + {"\nkey=value\n\t\nkey2=value2", "key", "value", "key2", "value2"}, + + // escaped chars in key + {"k\\ ey = value", "k ey", "value"}, + {"k\\:ey = value", "k:ey", "value"}, + {"k\\=ey = value", "k=ey", "value"}, + {"k\\fey = value", "k\fey", "value"}, + {"k\\ney = value", "k\ney", "value"}, + {"k\\rey = value", "k\rey", "value"}, + {"k\\tey = value", "k\tey", "value"}, + + // escaped chars in value + {"key = v\\ alue", "key", "v alue"}, + {"key = v\\:alue", "key", "v:alue"}, + {"key = v\\=alue", "key", "v=alue"}, + {"key = v\\falue", "key", "v\falue"}, + {"key = v\\nalue", "key", "v\nalue"}, + {"key = v\\ralue", "key", "v\ralue"}, + {"key = v\\talue", "key", "v\talue"}, + + // silently dropped escape character + {"k\\zey = value", "kzey", "value"}, + {"key = v\\zalue", "key", "vzalue"}, + + // unicode literals + {"key\\u2318 = value", "key⌘", "value"}, + {"k\\u2318ey = value", "k⌘ey", "value"}, + {"key = value\\u2318", "key", "value⌘"}, + {"key = valu\\u2318e", "key", "valu⌘e"}, + + // multiline values + {"key = valueA,\\\n valueB", "key", "valueA,valueB"}, // SPACE indent + {"key = valueA,\\\n\f\f\fvalueB", "key", "valueA,valueB"}, // FF indent + {"key = valueA,\\\n\t\t\tvalueB", "key", "valueA,valueB"}, // TAB indent + {"key = valueA,\\\n \f\tvalueB", "key", "valueA,valueB"}, // mix indent + + // comments + {"# this is a comment\n! and so is this\nkey1=value1\nkey#2=value#2\n\nkey!3=value!3\n# and another one\n! and the final one", "key1", "value1", "key#2", "value#2", "key!3", "value!3"}, + + // expansion tests + {"key=value\nkey2=${key}", "key", "value", "key2", "value"}, + {"key=value\nkey2=aa${key}", "key", "value", "key2", "aavalue"}, + {"key=value\nkey2=${key}bb", "key", "value", "key2", "valuebb"}, + {"key=value\nkey2=aa${key}bb", "key", "value", "key2", "aavaluebb"}, + {"key=value\nkey2=${key}\nkey3=${key2}", "key", "value", "key2", "value", "key3", "value"}, + {"key=value\nkey2=${key}${key}", "key", "value", "key2", "valuevalue"}, + {"key=value\nkey2=${key}${key}${key}${key}", "key", "value", "key2", "valuevaluevaluevalue"}, + {"key=value\nkey2=${key}${key3}\nkey3=${key}", "key", "value", "key2", "valuevalue", "key3", "value"}, + {"key=value\nkey2=${key3}${key}${key4}\nkey3=${key}\nkey4=${key}", "key", "value", "key2", "valuevaluevalue", "key3", "value", "key4", "value"}, + {"key=${USER}", "key", os.Getenv("USER")}, + {"key=${USER}\nUSER=value", "key", "value", "USER", "value"}, +} + +// ---------------------------------------------------------------------------- + +var commentTests = []struct { + input, key, value string + comments []string +}{ + {"key=value", "key", "value", nil}, + {"#\nkey=value", "key", "value", []string{""}}, + {"#comment\nkey=value", "key", "value", []string{"comment"}}, + {"# comment\nkey=value", "key", "value", []string{"comment"}}, + {"# comment\nkey=value", "key", "value", []string{"comment"}}, + {"# comment\n\nkey=value", "key", "value", []string{"comment"}}, + {"# comment1\n# comment2\nkey=value", "key", "value", []string{"comment1", "comment2"}}, + {"# comment1\n\n# comment2\n\nkey=value", "key", "value", []string{"comment1", "comment2"}}, + {"!comment\nkey=value", "key", "value", []string{"comment"}}, + {"! comment\nkey=value", "key", "value", []string{"comment"}}, + {"! comment\nkey=value", "key", "value", []string{"comment"}}, + {"! comment\n\nkey=value", "key", "value", []string{"comment"}}, + {"! comment1\n! comment2\nkey=value", "key", "value", []string{"comment1", "comment2"}}, + {"! comment1\n\n! comment2\n\nkey=value", "key", "value", []string{"comment1", "comment2"}}, +} + +// ---------------------------------------------------------------------------- + +var errorTests = []struct { + input, msg string +}{ + // unicode literals + {"key\\u1 = value", "invalid unicode literal"}, + {"key\\u12 = value", "invalid unicode literal"}, + {"key\\u123 = value", "invalid unicode literal"}, + {"key\\u123g = value", "invalid unicode literal"}, + {"key\\u123", "invalid unicode literal"}, + + // circular references + {"key=${key}", "circular reference"}, + {"key1=${key2}\nkey2=${key1}", "circular reference"}, + + // malformed expressions + {"key=${ke", "malformed expression"}, + {"key=valu${ke", "malformed expression"}, +} + +// ---------------------------------------------------------------------------- + +var writeTests = []struct { + input, output, encoding string +}{ + // ISO-8859-1 tests + {"key = value", "key = value\n", "ISO-8859-1"}, + {"key = value \\\n continued", "key = value continued\n", "ISO-8859-1"}, + {"key⌘ = value", "key\\u2318 = value\n", "ISO-8859-1"}, + {"ke\\ \\:y = value", "ke\\ \\:y = value\n", "ISO-8859-1"}, + + // UTF-8 tests + {"key = value", "key = value\n", "UTF-8"}, + {"key = value \\\n continued", "key = value continued\n", "UTF-8"}, + {"key⌘ = value⌘", "key⌘ = value⌘\n", "UTF-8"}, + {"ke\\ \\:y = value", "ke\\ \\:y = value\n", "UTF-8"}, +} + +// ---------------------------------------------------------------------------- + +var writeCommentTests = []struct { + input, output, encoding string +}{ + // ISO-8859-1 tests + {"key = value", "key = value\n", "ISO-8859-1"}, + {"#\nkey = value", "key = value\n", "ISO-8859-1"}, + {"#\n#\n#\nkey = value", "key = value\n", "ISO-8859-1"}, + {"# comment\nkey = value", "# comment\nkey = value\n", "ISO-8859-1"}, + {"\n# comment\nkey = value", "# comment\nkey = value\n", "ISO-8859-1"}, + {"# comment\n\nkey = value", "# comment\nkey = value\n", "ISO-8859-1"}, + {"# comment1\n# comment2\nkey = value", "# comment1\n# comment2\nkey = value\n", "ISO-8859-1"}, + {"#comment1\nkey1 = value1\n#comment2\nkey2 = value2", "# comment1\nkey1 = value1\n\n# comment2\nkey2 = value2\n", "ISO-8859-1"}, + + // UTF-8 tests + {"key = value", "key = value\n", "UTF-8"}, + {"# comment⌘\nkey = value⌘", "# comment⌘\nkey = value⌘\n", "UTF-8"}, + {"\n# comment⌘\nkey = value⌘", "# comment⌘\nkey = value⌘\n", "UTF-8"}, + {"# comment⌘\n\nkey = value⌘", "# comment⌘\nkey = value⌘\n", "UTF-8"}, + {"# comment1⌘\n# comment2⌘\nkey = value⌘", "# comment1⌘\n# comment2⌘\nkey = value⌘\n", "UTF-8"}, + {"#comment1⌘\nkey1 = value1⌘\n#comment2⌘\nkey2 = value2⌘", "# comment1⌘\nkey1 = value1⌘\n\n# comment2⌘\nkey2 = value2⌘\n", "UTF-8"}, +} + +// ---------------------------------------------------------------------------- + +var boolTests = []struct { + input, key string + def, value bool +}{ + // valid values for TRUE + {"key = 1", "key", false, true}, + {"key = on", "key", false, true}, + {"key = On", "key", false, true}, + {"key = ON", "key", false, true}, + {"key = true", "key", false, true}, + {"key = True", "key", false, true}, + {"key = TRUE", "key", false, true}, + {"key = yes", "key", false, true}, + {"key = Yes", "key", false, true}, + {"key = YES", "key", false, true}, + + // valid values for FALSE (all other) + {"key = 0", "key", true, false}, + {"key = off", "key", true, false}, + {"key = false", "key", true, false}, + {"key = no", "key", true, false}, + + // non existent key + {"key = true", "key2", false, false}, +} + +// ---------------------------------------------------------------------------- + +var durationTests = []struct { + input, key string + def, value time.Duration +}{ + // valid values + {"key = 1", "key", 999, 1}, + {"key = 0", "key", 999, 0}, + {"key = -1", "key", 999, -1}, + {"key = 0123", "key", 999, 123}, + + // invalid values + {"key = 0xff", "key", 999, 999}, + {"key = 1.0", "key", 999, 999}, + {"key = a", "key", 999, 999}, + + // non existent key + {"key = 1", "key2", 999, 999}, +} + +// ---------------------------------------------------------------------------- + +var parsedDurationTests = []struct { + input, key string + def, value time.Duration +}{ + // valid values + {"key = -1ns", "key", 999, -1 * time.Nanosecond}, + {"key = 300ms", "key", 999, 300 * time.Millisecond}, + {"key = 5s", "key", 999, 5 * time.Second}, + {"key = 3h", "key", 999, 3 * time.Hour}, + {"key = 2h45m", "key", 999, 2*time.Hour + 45*time.Minute}, + + // invalid values + {"key = 0xff", "key", 999, 999}, + {"key = 1.0", "key", 999, 999}, + {"key = a", "key", 999, 999}, + {"key = 1", "key", 999, 999}, + {"key = 0", "key", 999, 0}, + + // non existent key + {"key = 1", "key2", 999, 999}, +} + +// ---------------------------------------------------------------------------- + +var floatTests = []struct { + input, key string + def, value float64 +}{ + // valid values + {"key = 1.0", "key", 999, 1.0}, + {"key = 0.0", "key", 999, 0.0}, + {"key = -1.0", "key", 999, -1.0}, + {"key = 1", "key", 999, 1}, + {"key = 0", "key", 999, 0}, + {"key = -1", "key", 999, -1}, + {"key = 0123", "key", 999, 123}, + + // invalid values + {"key = 0xff", "key", 999, 999}, + {"key = a", "key", 999, 999}, + + // non existent key + {"key = 1", "key2", 999, 999}, +} + +// ---------------------------------------------------------------------------- + +var int64Tests = []struct { + input, key string + def, value int64 +}{ + // valid values + {"key = 1", "key", 999, 1}, + {"key = 0", "key", 999, 0}, + {"key = -1", "key", 999, -1}, + {"key = 0123", "key", 999, 123}, + + // invalid values + {"key = 0xff", "key", 999, 999}, + {"key = 1.0", "key", 999, 999}, + {"key = a", "key", 999, 999}, + + // non existent key + {"key = 1", "key2", 999, 999}, +} + +// ---------------------------------------------------------------------------- + +var uint64Tests = []struct { + input, key string + def, value uint64 +}{ + // valid values + {"key = 1", "key", 999, 1}, + {"key = 0", "key", 999, 0}, + {"key = 0123", "key", 999, 123}, + + // invalid values + {"key = -1", "key", 999, 999}, + {"key = 0xff", "key", 999, 999}, + {"key = 1.0", "key", 999, 999}, + {"key = a", "key", 999, 999}, + + // non existent key + {"key = 1", "key2", 999, 999}, +} + +// ---------------------------------------------------------------------------- + +var stringTests = []struct { + input, key string + def, value string +}{ + // valid values + {"key = abc", "key", "def", "abc"}, + + // non existent key + {"key = abc", "key2", "def", "def"}, +} + +// ---------------------------------------------------------------------------- + +var keysTests = []struct { + input string + keys []string +}{ + {"", []string{}}, + {"key = abc", []string{"key"}}, + {"key = abc\nkey2=def", []string{"key", "key2"}}, + {"key2 = abc\nkey=def", []string{"key2", "key"}}, + {"key = abc\nkey=def", []string{"key"}}, +} + +// ---------------------------------------------------------------------------- + +var filterTests = []struct { + input string + pattern string + keys []string + err string +}{ + {"", "", []string{}, ""}, + {"", "abc", []string{}, ""}, + {"key=value", "", []string{"key"}, ""}, + {"key=value", "key=", []string{}, ""}, + {"key=value\nfoo=bar", "", []string{"foo", "key"}, ""}, + {"key=value\nfoo=bar", "f", []string{"foo"}, ""}, + {"key=value\nfoo=bar", "fo", []string{"foo"}, ""}, + {"key=value\nfoo=bar", "foo", []string{"foo"}, ""}, + {"key=value\nfoo=bar", "fooo", []string{}, ""}, + {"key=value\nkey2=value2\nfoo=bar", "ey", []string{"key", "key2"}, ""}, + {"key=value\nkey2=value2\nfoo=bar", "key", []string{"key", "key2"}, ""}, + {"key=value\nkey2=value2\nfoo=bar", "^key", []string{"key", "key2"}, ""}, + {"key=value\nkey2=value2\nfoo=bar", "^(key|foo)", []string{"foo", "key", "key2"}, ""}, + {"key=value\nkey2=value2\nfoo=bar", "[ abc", nil, "error parsing regexp.*"}, +} + +// ---------------------------------------------------------------------------- + +var filterPrefixTests = []struct { + input string + prefix string + keys []string +}{ + {"", "", []string{}}, + {"", "abc", []string{}}, + {"key=value", "", []string{"key"}}, + {"key=value", "key=", []string{}}, + {"key=value\nfoo=bar", "", []string{"foo", "key"}}, + {"key=value\nfoo=bar", "f", []string{"foo"}}, + {"key=value\nfoo=bar", "fo", []string{"foo"}}, + {"key=value\nfoo=bar", "foo", []string{"foo"}}, + {"key=value\nfoo=bar", "fooo", []string{}}, + {"key=value\nkey2=value2\nfoo=bar", "key", []string{"key", "key2"}}, +} + +// ---------------------------------------------------------------------------- + +var filterStripPrefixTests = []struct { + input string + prefix string + keys []string +}{ + {"", "", []string{}}, + {"", "abc", []string{}}, + {"key=value", "", []string{"key"}}, + {"key=value", "key=", []string{}}, + {"key=value\nfoo=bar", "", []string{"foo", "key"}}, + {"key=value\nfoo=bar", "f", []string{"foo"}}, + {"key=value\nfoo=bar", "fo", []string{"foo"}}, + {"key=value\nfoo=bar", "foo", []string{"foo"}}, + {"key=value\nfoo=bar", "fooo", []string{}}, + {"key=value\nkey2=value2\nfoo=bar", "key", []string{"key", "key2"}}, +} + +// ---------------------------------------------------------------------------- + +var setTests = []struct { + input string + key, value string + prev string + ok bool + err string + keys []string +}{ + {"", "", "", "", false, "", []string{}}, + {"", "key", "value", "", false, "", []string{"key"}}, + {"key=value", "key2", "value2", "", false, "", []string{"key", "key2"}}, + {"key=value", "abc", "value3", "", false, "", []string{"key", "abc"}}, + {"key=value", "key", "value3", "value", true, "", []string{"key"}}, +} + +// ---------------------------------------------------------------------------- + +// TestBasic tests basic single key/value combinations with all possible +// whitespace, delimiter and newline permutations. +func TestBasic(t *testing.T) { + testWhitespaceAndDelimiterCombinations(t, "key", "") + testWhitespaceAndDelimiterCombinations(t, "key", "value") + testWhitespaceAndDelimiterCombinations(t, "key", "value ") +} + +func TestComplex(t *testing.T) { + for _, test := range complexTests { + testKeyValue(t, test[0], test[1:]...) + } +} + +func TestErrors(t *testing.T) { + for _, test := range errorTests { + _, err := Load([]byte(test.input), ISO_8859_1) + assert.Equal(t, err != nil, true, "want error") + assert.Equal(t, strings.Contains(err.Error(), test.msg), true) + } +} + +func TestVeryDeep(t *testing.T) { + input := "key0=value\n" + prefix := "${" + postfix := "}" + i := 0 + for i = 0; i < maxExpansionDepth-1; i++ { + input += fmt.Sprintf("key%d=%skey%d%s\n", i+1, prefix, i, postfix) + } + + p, err := Load([]byte(input), ISO_8859_1) + assert.Equal(t, err, nil) + p.Prefix = prefix + p.Postfix = postfix + + assert.Equal(t, p.MustGet(fmt.Sprintf("key%d", i)), "value") + + // Nudge input over the edge + input += fmt.Sprintf("key%d=%skey%d%s\n", i+1, prefix, i, postfix) + + _, err = Load([]byte(input), ISO_8859_1) + assert.Equal(t, err != nil, true, "want error") + assert.Equal(t, strings.Contains(err.Error(), "expansion too deep"), true) +} + +func TestDisableExpansion(t *testing.T) { + input := "key=value\nkey2=${key}" + p := mustParse(t, input) + p.DisableExpansion = true + assert.Equal(t, p.MustGet("key"), "value") + assert.Equal(t, p.MustGet("key2"), "${key}") + + // with expansion disabled we can introduce circular references + p.MustSet("keyA", "${keyB}") + p.MustSet("keyB", "${keyA}") + assert.Equal(t, p.MustGet("keyA"), "${keyB}") + assert.Equal(t, p.MustGet("keyB"), "${keyA}") +} + +func TestDisableExpansionStillUpdatesKeys(t *testing.T) { + p := NewProperties() + p.MustSet("p1", "a") + assert.Equal(t, p.Keys(), []string{"p1"}) + assert.Equal(t, p.String(), "p1 = a\n") + + p.DisableExpansion = true + p.MustSet("p2", "b") + + assert.Equal(t, p.Keys(), []string{"p1", "p2"}) + assert.Equal(t, p.String(), "p1 = a\np2 = b\n") +} + +func TestMustGet(t *testing.T) { + input := "key = value\nkey2 = ghi" + p := mustParse(t, input) + assert.Equal(t, p.MustGet("key"), "value") + assert.Panic(t, func() { p.MustGet("invalid") }, "unknown property: invalid") +} + +func TestGetBool(t *testing.T) { + for _, test := range boolTests { + p := mustParse(t, test.input) + assert.Equal(t, p.Len(), 1) + assert.Equal(t, p.GetBool(test.key, test.def), test.value) + } +} + +func TestMustGetBool(t *testing.T) { + input := "key = true\nkey2 = ghi" + p := mustParse(t, input) + assert.Equal(t, p.MustGetBool("key"), true) + assert.Panic(t, func() { p.MustGetBool("invalid") }, "unknown property: invalid") +} + +func TestGetDuration(t *testing.T) { + for _, test := range durationTests { + p := mustParse(t, test.input) + assert.Equal(t, p.Len(), 1) + assert.Equal(t, p.GetDuration(test.key, test.def), test.value) + } +} + +func TestMustGetDuration(t *testing.T) { + input := "key = 123\nkey2 = ghi" + p := mustParse(t, input) + assert.Equal(t, p.MustGetDuration("key"), time.Duration(123)) + assert.Panic(t, func() { p.MustGetDuration("key2") }, "strconv.ParseInt: parsing.*") + assert.Panic(t, func() { p.MustGetDuration("invalid") }, "unknown property: invalid") +} + +func TestGetParsedDuration(t *testing.T) { + for _, test := range parsedDurationTests { + p := mustParse(t, test.input) + assert.Equal(t, p.Len(), 1) + assert.Equal(t, p.GetParsedDuration(test.key, test.def), test.value) + } +} + +func TestMustGetParsedDuration(t *testing.T) { + input := "key = 123ms\nkey2 = ghi" + p := mustParse(t, input) + assert.Equal(t, p.MustGetParsedDuration("key"), 123*time.Millisecond) + assert.Panic(t, func() { p.MustGetParsedDuration("key2") }, "time: invalid duration ghi") + assert.Panic(t, func() { p.MustGetParsedDuration("invalid") }, "unknown property: invalid") +} + +func TestGetFloat64(t *testing.T) { + for _, test := range floatTests { + p := mustParse(t, test.input) + assert.Equal(t, p.Len(), 1) + assert.Equal(t, p.GetFloat64(test.key, test.def), test.value) + } +} + +func TestMustGetFloat64(t *testing.T) { + input := "key = 123\nkey2 = ghi" + p := mustParse(t, input) + assert.Equal(t, p.MustGetFloat64("key"), float64(123)) + assert.Panic(t, func() { p.MustGetFloat64("key2") }, "strconv.ParseFloat: parsing.*") + assert.Panic(t, func() { p.MustGetFloat64("invalid") }, "unknown property: invalid") +} + +func TestGetInt(t *testing.T) { + for _, test := range int64Tests { + p := mustParse(t, test.input) + assert.Equal(t, p.Len(), 1) + assert.Equal(t, p.GetInt(test.key, int(test.def)), int(test.value)) + } +} + +func TestMustGetInt(t *testing.T) { + input := "key = 123\nkey2 = ghi" + p := mustParse(t, input) + assert.Equal(t, p.MustGetInt("key"), int(123)) + assert.Panic(t, func() { p.MustGetInt("key2") }, "strconv.ParseInt: parsing.*") + assert.Panic(t, func() { p.MustGetInt("invalid") }, "unknown property: invalid") +} + +func TestGetInt64(t *testing.T) { + for _, test := range int64Tests { + p := mustParse(t, test.input) + assert.Equal(t, p.Len(), 1) + assert.Equal(t, p.GetInt64(test.key, test.def), test.value) + } +} + +func TestMustGetInt64(t *testing.T) { + input := "key = 123\nkey2 = ghi" + p := mustParse(t, input) + assert.Equal(t, p.MustGetInt64("key"), int64(123)) + assert.Panic(t, func() { p.MustGetInt64("key2") }, "strconv.ParseInt: parsing.*") + assert.Panic(t, func() { p.MustGetInt64("invalid") }, "unknown property: invalid") +} + +func TestGetUint(t *testing.T) { + for _, test := range uint64Tests { + p := mustParse(t, test.input) + assert.Equal(t, p.Len(), 1) + assert.Equal(t, p.GetUint(test.key, uint(test.def)), uint(test.value)) + } +} + +func TestMustGetUint(t *testing.T) { + input := "key = 123\nkey2 = ghi" + p := mustParse(t, input) + assert.Equal(t, p.MustGetUint("key"), uint(123)) + assert.Panic(t, func() { p.MustGetUint64("key2") }, "strconv.ParseUint: parsing.*") + assert.Panic(t, func() { p.MustGetUint64("invalid") }, "unknown property: invalid") +} + +func TestGetUint64(t *testing.T) { + for _, test := range uint64Tests { + p := mustParse(t, test.input) + assert.Equal(t, p.Len(), 1) + assert.Equal(t, p.GetUint64(test.key, test.def), test.value) + } +} + +func TestMustGetUint64(t *testing.T) { + input := "key = 123\nkey2 = ghi" + p := mustParse(t, input) + assert.Equal(t, p.MustGetUint64("key"), uint64(123)) + assert.Panic(t, func() { p.MustGetUint64("key2") }, "strconv.ParseUint: parsing.*") + assert.Panic(t, func() { p.MustGetUint64("invalid") }, "unknown property: invalid") +} + +func TestGetString(t *testing.T) { + for _, test := range stringTests { + p := mustParse(t, test.input) + assert.Equal(t, p.Len(), 1) + assert.Equal(t, p.GetString(test.key, test.def), test.value) + } +} + +func TestMustGetString(t *testing.T) { + input := `key = value` + p := mustParse(t, input) + assert.Equal(t, p.MustGetString("key"), "value") + assert.Panic(t, func() { p.MustGetString("invalid") }, "unknown property: invalid") +} + +func TestComment(t *testing.T) { + for _, test := range commentTests { + p := mustParse(t, test.input) + assert.Equal(t, p.MustGetString(test.key), test.value) + assert.Equal(t, p.GetComments(test.key), test.comments) + if test.comments != nil { + assert.Equal(t, p.GetComment(test.key), test.comments[len(test.comments)-1]) + } else { + assert.Equal(t, p.GetComment(test.key), "") + } + + // test setting comments + if len(test.comments) > 0 { + // set single comment + p.ClearComments() + assert.Equal(t, len(p.c), 0) + p.SetComment(test.key, test.comments[0]) + assert.Equal(t, p.GetComment(test.key), test.comments[0]) + + // set multiple comments + p.ClearComments() + assert.Equal(t, len(p.c), 0) + p.SetComments(test.key, test.comments) + assert.Equal(t, p.GetComments(test.key), test.comments) + + // clear comments for a key + p.SetComments(test.key, nil) + assert.Equal(t, p.GetComment(test.key), "") + assert.Equal(t, p.GetComments(test.key), ([]string)(nil)) + } + } +} + +func TestFilter(t *testing.T) { + for _, test := range filterTests { + p := mustParse(t, test.input) + pp, err := p.Filter(test.pattern) + if err != nil { + assert.Matches(t, err.Error(), test.err) + continue + } + assert.Equal(t, pp != nil, true, "want properties") + assert.Equal(t, pp.Len(), len(test.keys)) + for _, key := range test.keys { + v1, ok1 := p.Get(key) + v2, ok2 := pp.Get(key) + assert.Equal(t, ok1, true) + assert.Equal(t, ok2, true) + assert.Equal(t, v1, v2) + } + } +} + +func TestFilterPrefix(t *testing.T) { + for _, test := range filterPrefixTests { + p := mustParse(t, test.input) + pp := p.FilterPrefix(test.prefix) + assert.Equal(t, pp != nil, true, "want properties") + assert.Equal(t, pp.Len(), len(test.keys)) + for _, key := range test.keys { + v1, ok1 := p.Get(key) + v2, ok2 := pp.Get(key) + assert.Equal(t, ok1, true) + assert.Equal(t, ok2, true) + assert.Equal(t, v1, v2) + } + } +} + +func TestFilterStripPrefix(t *testing.T) { + for _, test := range filterStripPrefixTests { + p := mustParse(t, test.input) + pp := p.FilterPrefix(test.prefix) + assert.Equal(t, pp != nil, true, "want properties") + assert.Equal(t, pp.Len(), len(test.keys)) + for _, key := range test.keys { + v1, ok1 := p.Get(key) + v2, ok2 := pp.Get(key) + assert.Equal(t, ok1, true) + assert.Equal(t, ok2, true) + assert.Equal(t, v1, v2) + } + } +} + +func TestKeys(t *testing.T) { + for _, test := range keysTests { + p := mustParse(t, test.input) + assert.Equal(t, p.Len(), len(test.keys)) + assert.Equal(t, len(p.Keys()), len(test.keys)) + assert.Equal(t, p.Keys(), test.keys) + } +} + +func TestSet(t *testing.T) { + for _, test := range setTests { + p := mustParse(t, test.input) + prev, ok, err := p.Set(test.key, test.value) + if test.err != "" { + assert.Matches(t, err.Error(), test.err) + continue + } + + assert.Equal(t, err, nil) + assert.Equal(t, ok, test.ok) + if ok { + assert.Equal(t, prev, test.prev) + } + assert.Equal(t, p.Keys(), test.keys) + } +} + +func TestSetValue(t *testing.T) { + tests := []interface{}{ + true, false, + int8(123), int16(123), int32(123), int64(123), int(123), + uint8(123), uint16(123), uint32(123), uint64(123), uint(123), + float32(1.23), float64(1.23), + "abc", + } + + for _, v := range tests { + p := NewProperties() + err := p.SetValue("x", v) + assert.Equal(t, err, nil) + assert.Equal(t, p.GetString("x", ""), fmt.Sprintf("%v", v)) + } +} + +func TestMustSet(t *testing.T) { + input := "key=${key}" + p := mustParse(t, input) + assert.Panic(t, func() { p.MustSet("key", "${key}") }, "circular reference .*") +} + +func TestWrite(t *testing.T) { + for _, test := range writeTests { + p, err := parse(test.input) + + buf := new(bytes.Buffer) + var n int + switch test.encoding { + case "UTF-8": + n, err = p.Write(buf, UTF8) + case "ISO-8859-1": + n, err = p.Write(buf, ISO_8859_1) + } + assert.Equal(t, err, nil) + s := string(buf.Bytes()) + assert.Equal(t, n, len(test.output), fmt.Sprintf("input=%q expected=%q obtained=%q", test.input, test.output, s)) + assert.Equal(t, s, test.output, fmt.Sprintf("input=%q expected=%q obtained=%q", test.input, test.output, s)) + } +} + +func TestWriteComment(t *testing.T) { + for _, test := range writeCommentTests { + p, err := parse(test.input) + + buf := new(bytes.Buffer) + var n int + switch test.encoding { + case "UTF-8": + n, err = p.WriteComment(buf, "# ", UTF8) + case "ISO-8859-1": + n, err = p.WriteComment(buf, "# ", ISO_8859_1) + } + assert.Equal(t, err, nil) + s := string(buf.Bytes()) + assert.Equal(t, n, len(test.output), fmt.Sprintf("input=%q expected=%q obtained=%q", test.input, test.output, s)) + assert.Equal(t, s, test.output, fmt.Sprintf("input=%q expected=%q obtained=%q", test.input, test.output, s)) + } +} + +func TestCustomExpansionExpression(t *testing.T) { + testKeyValuePrePostfix(t, "*[", "]*", "key=value\nkey2=*[key]*", "key", "value", "key2", "value") +} + +func TestPanicOn32BitIntOverflow(t *testing.T) { + is32Bit = true + var min, max int64 = math.MinInt32 - 1, math.MaxInt32 + 1 + input := fmt.Sprintf("min=%d\nmax=%d", min, max) + p := mustParse(t, input) + assert.Equal(t, p.MustGetInt64("min"), min) + assert.Equal(t, p.MustGetInt64("max"), max) + assert.Panic(t, func() { p.MustGetInt("min") }, ".* out of range") + assert.Panic(t, func() { p.MustGetInt("max") }, ".* out of range") +} + +func TestPanicOn32BitUintOverflow(t *testing.T) { + is32Bit = true + var max uint64 = math.MaxUint32 + 1 + input := fmt.Sprintf("max=%d", max) + p := mustParse(t, input) + assert.Equal(t, p.MustGetUint64("max"), max) + assert.Panic(t, func() { p.MustGetUint("max") }, ".* out of range") +} + +func TestDeleteKey(t *testing.T) { + input := "#comments should also be gone\nkey=to-be-deleted\nsecond=key" + p := mustParse(t, input) + assert.Equal(t, len(p.m), 2) + assert.Equal(t, len(p.c), 1) + assert.Equal(t, len(p.k), 2) + p.Delete("key") + assert.Equal(t, len(p.m), 1) + assert.Equal(t, len(p.c), 0) + assert.Equal(t, len(p.k), 1) + assert.Equal(t, p.k[0], "second") + assert.Equal(t, p.m["second"], "key") +} + +func TestDeleteUnknownKey(t *testing.T) { + input := "#comments should also be gone\nkey=to-be-deleted" + p := mustParse(t, input) + assert.Equal(t, len(p.m), 1) + assert.Equal(t, len(p.c), 1) + assert.Equal(t, len(p.k), 1) + p.Delete("wrong-key") + assert.Equal(t, len(p.m), 1) + assert.Equal(t, len(p.c), 1) + assert.Equal(t, len(p.k), 1) +} + +func TestMerge(t *testing.T) { + input1 := "#comment\nkey=value\nkey2=value2" + input2 := "#another comment\nkey=another value\nkey3=value3" + p1 := mustParse(t, input1) + p2 := mustParse(t, input2) + p1.Merge(p2) + assert.Equal(t, len(p1.m), 3) + assert.Equal(t, len(p1.c), 1) + assert.Equal(t, len(p1.k), 3) + assert.Equal(t, p1.MustGet("key"), "another value") + assert.Equal(t, p1.GetComment("key"), "another comment") +} + +func TestMap(t *testing.T) { + input := "key=value\nabc=def" + p := mustParse(t, input) + m := map[string]string{"key": "value", "abc": "def"} + assert.Equal(t, p.Map(), m) +} + +func TestFilterFunc(t *testing.T) { + input := "key=value\nabc=def" + p := mustParse(t, input) + pp := p.FilterFunc(func(k, v string) bool { + return k != "abc" + }) + m := map[string]string{"key": "value"} + assert.Equal(t, pp.Map(), m) +} + +func TestLoad(t *testing.T) { + x := "key=${value}\nvalue=${key}" + p := NewProperties() + p.DisableExpansion = true + err := p.Load([]byte(x), UTF8) + assert.Equal(t, err, nil) +} + +// ---------------------------------------------------------------------------- + +// tests all combinations of delimiters, leading and/or trailing whitespace and newlines. +func testWhitespaceAndDelimiterCombinations(t *testing.T, key, value string) { + whitespace := []string{"", " ", "\f", "\t"} + delimiters := []string{"", " ", "=", ":"} + newlines := []string{"", "\r", "\n", "\r\n"} + for _, dl := range delimiters { + for _, ws1 := range whitespace { + for _, ws2 := range whitespace { + for _, nl := range newlines { + // skip the one case where there is nothing between a key and a value + if ws1 == "" && dl == "" && ws2 == "" && value != "" { + continue + } + + input := fmt.Sprintf("%s%s%s%s%s%s", key, ws1, dl, ws2, value, nl) + testKeyValue(t, input, key, value) + } + } + } + } +} + +// tests whether key/value pairs exist for a given input. +// keyvalues is expected to be an even number of strings of "key", "value", ... +func testKeyValue(t *testing.T, input string, keyvalues ...string) { + testKeyValuePrePostfix(t, "${", "}", input, keyvalues...) +} + +// tests whether key/value pairs exist for a given input. +// keyvalues is expected to be an even number of strings of "key", "value", ... +func testKeyValuePrePostfix(t *testing.T, prefix, postfix, input string, keyvalues ...string) { + p, err := Load([]byte(input), ISO_8859_1) + assert.Equal(t, err, nil) + p.Prefix = prefix + p.Postfix = postfix + assertKeyValues(t, input, p, keyvalues...) +} + +// tests whether key/value pairs exist for a given input. +// keyvalues is expected to be an even number of strings of "key", "value", ... +func assertKeyValues(t *testing.T, input string, p *Properties, keyvalues ...string) { + assert.Equal(t, p != nil, true, "want properties") + assert.Equal(t, 2*p.Len(), len(keyvalues), "Odd number of key/value pairs.") + + for i := 0; i < len(keyvalues); i += 2 { + key, value := keyvalues[i], keyvalues[i+1] + v, ok := p.Get(key) + if !ok { + t.Errorf("No key %q found (input=%q)", key, input) + } + if got, want := v, value; !reflect.DeepEqual(got, want) { + t.Errorf("Value %q does not match %q (input=%q)", v, value, input) + } + } +} + +func mustParse(t *testing.T, s string) *Properties { + p, err := parse(s) + if err != nil { + t.Fatalf("parse failed with %s", err) + } + return p +} + +// prints to stderr if the -verbose flag was given. +func printf(format string, args ...interface{}) { + if *verbose { + fmt.Fprintf(os.Stderr, format, args...) + } +} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go new file mode 100644 index 000000000..b013a2e5e --- /dev/null +++ b/vendor/github.com/magiconair/properties/rangecheck.go @@ -0,0 +1,31 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "math" +) + +// make this a var to overwrite it in a test +var is32Bit = ^uint(0) == math.MaxUint32 + +// intRangeCheck checks if the value fits into the int type and +// panics if it does not. +func intRangeCheck(key string, v int64) int { + if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { + panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) + } + return int(v) +} + +// uintRangeCheck checks if the value fits into the uint type and +// panics if it does not. +func uintRangeCheck(key string, v uint64) uint { + if is32Bit && v > math.MaxUint32 { + panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) + } + return uint(v) +} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml new file mode 100644 index 000000000..d9deadb86 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.9.x + - tip + +script: + - go test diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000..0018dc7d9 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000..2a727575a --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,171 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go new file mode 100644 index 000000000..a81728e74 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go @@ -0,0 +1,259 @@ +package mapstructure + +import ( + "errors" + "reflect" + "testing" + "time" +) + +func TestComposeDecodeHookFunc(t *testing.T) { + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "foo", nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "bar", nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + result, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if result.(string) != "foobar" { + t.Fatalf("bad: %#v", result) + } +} + +func TestComposeDecodeHookFunc_err(t *testing.T) { + f1 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + return nil, errors.New("foo") + } + + f2 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + panic("NOPE") + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), 42) + if err.Error() != "foo" { + t.Fatalf("bad: %s", err) + } +} + +func TestComposeDecodeHookFunc_kinds(t *testing.T) { + var f2From reflect.Kind + + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return int(42), nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + f2From = f + return data, nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if f2From != reflect.Int { + t.Fatalf("bad: %#v", f2From) + } +} + +func TestStringToSliceHookFunc(t *testing.T) { + f := StringToSliceHookFunc(",") + + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {sliceType, sliceType, 42, 42, false}, + {strType, strType, 42, 42, false}, + { + strType, + sliceType, + "foo,bar,baz", + []string{"foo", "bar", "baz"}, + false, + }, + { + strType, + sliceType, + "", + []string{}, + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestStringToTimeDurationHookFunc(t *testing.T) { + f := StringToTimeDurationHookFunc() + + strType := reflect.TypeOf("") + timeType := reflect.TypeOf(time.Duration(5)) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {strType, timeType, "5s", 5 * time.Second, false}, + {strType, timeType, "5", time.Duration(0), true}, + {strType, strType, "5", "5", false}, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestStringToTimeHookFunc(t *testing.T) { + strType := reflect.TypeOf("") + timeType := reflect.TypeOf(time.Time{}) + cases := []struct { + f, t reflect.Type + layout string + data interface{} + result interface{} + err bool + }{ + {strType, timeType, time.RFC3339, "2006-01-02T15:04:05Z", + time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), false}, + {strType, timeType, time.RFC3339, "5", time.Time{}, true}, + {strType, strType, time.RFC3339, "5", "5", false}, + } + + for i, tc := range cases { + f := StringToTimeHookFunc(tc.layout) + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestWeaklyTypedHook(t *testing.T) { + var f DecodeHookFunc = WeaklyTypedHook + + boolType := reflect.TypeOf(true) + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + // TO STRING + { + boolType, + strType, + false, + "0", + false, + }, + + { + boolType, + strType, + true, + "1", + false, + }, + + { + reflect.TypeOf(float32(1)), + strType, + float32(7), + "7", + false, + }, + + { + reflect.TypeOf(int(1)), + strType, + int(7), + "7", + false, + }, + + { + sliceType, + strType, + []uint8("foo"), + "foo", + false, + }, + + { + reflect.TypeOf(uint(1)), + strType, + uint(7), + "7", + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000..47a99e5af --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000..13cc5e3d6 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1061 @@ +// Package mapstructure exposes functionality to convert an arbitrary +// map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + inputVal := reflect.ValueOf(input) + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec( + d.config.DecodeHook, + inputVal.Type(), outVal.Type(), input) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + inputKind := getKind(outVal) + switch inputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, inputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + return d.decode(name, data, val.Elem()) + } + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + tagParts := strings.Split(tagValue, ",") + + // Determine the name of the key in the map + keyName := f.Name + if tagParts[0] != "" { + if tagParts[0] == "-" { + continue + } + keyName = tagParts[0] + } + + // If "squash" is specified in the tag, we squash the field down. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + if squash && v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + err := d.decode(keyName, x.Interface(), vMap) + if err != nil { + return err + } + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, structVal.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go new file mode 100644 index 000000000..41d2a41f7 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go @@ -0,0 +1,279 @@ +package mapstructure + +import ( + "encoding/json" + "testing" +) + +func Benchmark_Decode(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +// decodeViaJSON takes the map data and passes it through encoding/json to convert it into the +// given Go native structure pointed to by v. v must be a pointer to a struct. +func decodeViaJSON(data interface{}, v interface{}) error { + // Perform the task by simply marshalling the input into JSON, + // then unmarshalling it into target native Go struct. + b, err := json.Marshal(data) + if err != nil { + return err + } + return json.Unmarshal(b, v) +} + +func Benchmark_DecodeViaJSON(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + decodeViaJSON(input, &result) + } +} + +func Benchmark_DecodeBasic(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + } + + var result Basic + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeTypeConversion(b *testing.B) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + var resultStrict TypeConversionResult + for i := 0; i < b.N; i++ { + Decode(input, &resultStrict) + } +} + +func Benchmark_DecodeMap(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeMapOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSlice(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + var result Slice + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSliceOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeWeaklyTypedInput(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadata(b *testing.B) { + type Person struct { + Name string + Age int + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadataEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + b.Fatalf("err: %s", err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeTagged(b *testing.B) { + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go new file mode 100644 index 000000000..1319da566 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go @@ -0,0 +1,372 @@ +package mapstructure + +import ( + "reflect" + "testing" +) + +// GH-1, GH-10, GH-96 +func TestDecode_NilValue(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + in interface{} + target interface{} + out interface{} + metaKeys []string + metaUnused []string + }{ + { + "all nil", + &map[string]interface{}{ + "vfoo": nil, + "vother": nil, + }, + &Map{Vfoo: "foo", Vother: map[string]string{"foo": "bar"}}, + &Map{Vfoo: "", Vother: nil}, + []string{"Vfoo", "Vother"}, + []string{}, + }, + { + "partial nil", + &map[string]interface{}{ + "vfoo": "baz", + "vother": nil, + }, + &Map{Vfoo: "foo", Vother: map[string]string{"foo": "bar"}}, + &Map{Vfoo: "baz", Vother: nil}, + []string{"Vfoo", "Vother"}, + []string{}, + }, + { + "partial decode", + &map[string]interface{}{ + "vother": nil, + }, + &Map{Vfoo: "foo", Vother: map[string]string{"foo": "bar"}}, + &Map{Vfoo: "foo", Vother: nil}, + []string{"Vother"}, + []string{}, + }, + { + "unused values", + &map[string]interface{}{ + "vbar": "bar", + "vfoo": nil, + "vother": nil, + }, + &Map{Vfoo: "foo", Vother: map[string]string{"foo": "bar"}}, + &Map{Vfoo: "", Vother: nil}, + []string{"Vfoo", "Vother"}, + []string{"vbar"}, + }, + { + "map interface all nil", + &map[interface{}]interface{}{ + "vfoo": nil, + "vother": nil, + }, + &Map{Vfoo: "foo", Vother: map[string]string{"foo": "bar"}}, + &Map{Vfoo: "", Vother: nil}, + []string{"Vfoo", "Vother"}, + []string{}, + }, + { + "map interface partial nil", + &map[interface{}]interface{}{ + "vfoo": "baz", + "vother": nil, + }, + &Map{Vfoo: "foo", Vother: map[string]string{"foo": "bar"}}, + &Map{Vfoo: "baz", Vother: nil}, + []string{"Vfoo", "Vother"}, + []string{}, + }, + { + "map interface partial decode", + &map[interface{}]interface{}{ + "vother": nil, + }, + &Map{Vfoo: "foo", Vother: map[string]string{"foo": "bar"}}, + &Map{Vfoo: "foo", Vother: nil}, + []string{"Vother"}, + []string{}, + }, + { + "map interface unused values", + &map[interface{}]interface{}{ + "vbar": "bar", + "vfoo": nil, + "vother": nil, + }, + &Map{Vfoo: "foo", Vother: map[string]string{"foo": "bar"}}, + &Map{Vfoo: "", Vother: nil}, + []string{"Vfoo", "Vother"}, + []string{"vbar"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + config := &DecoderConfig{ + Metadata: new(Metadata), + Result: tc.target, + ZeroFields: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + err = decoder.Decode(tc.in) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if !reflect.DeepEqual(tc.out, tc.target) { + t.Fatalf("%q: TestDecode_NilValue() expected: %#v, got: %#v", tc.name, tc.out, tc.target) + } + + if !reflect.DeepEqual(tc.metaKeys, config.Metadata.Keys) { + t.Fatalf("%q: Metadata.Keys mismatch expected: %#v, got: %#v", tc.name, tc.metaKeys, config.Metadata.Keys) + } + + if !reflect.DeepEqual(tc.metaUnused, config.Metadata.Unused) { + t.Fatalf("%q: Metadata.Unused mismatch expected: %#v, got: %#v", tc.name, tc.metaUnused, config.Metadata.Unused) + } + }) + } +} + +// #48 +func TestNestedTypePointerWithDefaults(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + result := NestedPointer{ + Vbar: &Basic{ + Vuint: 42, + }, + } + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } + + // this is the error + if result.Vbar.Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vbar.Vuint) + } + +} + +type NestedSlice struct { + Vfoo string + Vbars []Basic + Vempty []Basic +} + +// #48 +func TestNestedTypeSliceWithDefaults(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbars": []map[string]interface{}{ + {"vstring": "foo", "vint": 42, "vbool": true}, + {"vint": 42, "vbool": true}, + }, + "vempty": []map[string]interface{}{ + {"vstring": "foo", "vint": 42, "vbool": true}, + {"vint": 42, "vbool": true}, + }, + } + + result := NestedSlice{ + Vbars: []Basic{ + {Vuint: 42}, + {Vstring: "foo"}, + }, + } + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbars[0].Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbars[0].Vstring) + } + // this is the error + if result.Vbars[0].Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vbars[0].Vuint) + } +} + +// #48 workaround +func TestNestedTypeWithDefaults(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + result := Nested{ + Vbar: Basic{ + Vuint: 42, + }, + } + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } + + // this is the error + if result.Vbar.Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vbar.Vuint) + } + +} + +// #67 panic() on extending slices (decodeSlice with disabled ZeroValues) +func TestDecodeSliceToEmptySliceWOZeroing(t *testing.T) { + t.Parallel() + + type TestStruct struct { + Vfoo []string + } + + decode := func(m interface{}, rawVal interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: rawVal, + ZeroFields: false, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) + } + + { + input := map[string]interface{}{ + "vfoo": []string{"1"}, + } + + result := &TestStruct{} + + err := decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + } + + { + input := map[string]interface{}{ + "vfoo": []string{"1"}, + } + + result := &TestStruct{ + Vfoo: []string{}, + } + + err := decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + } + + { + input := map[string]interface{}{ + "vfoo": []string{"2", "3"}, + } + + result := &TestStruct{ + Vfoo: []string{"1"}, + } + + err := decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + } +} + +// #70 +func TestNextSquashMapstructure(t *testing.T) { + data := &struct { + Level1 struct { + Level2 struct { + Foo string + } `mapstructure:",squash"` + } `mapstructure:",squash"` + }{} + err := Decode(map[interface{}]interface{}{"foo": "baz"}, &data) + if err != nil { + t.Fatalf("should not error: %s", err) + } + if data.Level1.Level2.Foo != "baz" { + t.Fatal("value should be baz") + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go new file mode 100644 index 000000000..f17c214a8 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go @@ -0,0 +1,203 @@ +package mapstructure + +import ( + "fmt" +) + +func ExampleDecode() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91, Emails:[]string{"one", "two", "three"}, Extra:map[string]string{"twitter":"mitchellh"}} +} + +func ExampleDecode_errors() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": 123, + "age": "bad value", + "emails": []int{1, 2, 3}, + } + + var result Person + err := Decode(input, &result) + if err == nil { + panic("should have an error") + } + + fmt.Println(err.Error()) + // Output: + // 5 error(s) decoding: + // + // * 'Age' expected type 'int', got unconvertible type 'string' + // * 'Emails[0]' expected type 'string', got unconvertible type 'int' + // * 'Emails[1]' expected type 'string', got unconvertible type 'int' + // * 'Emails[2]' expected type 'string', got unconvertible type 'int' + // * 'Name' expected type 'string', got unconvertible type 'int' +} + +func ExampleDecode_metadata() { + type Person struct { + Name string + Age int + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + // For metadata, we make a more advanced DecoderConfig so we can + // more finely configure the decoder that is used. In this case, we + // just tell the decoder we want to track metadata. + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + if err := decoder.Decode(input); err != nil { + panic(err) + } + + fmt.Printf("Unused keys: %#v", md.Unused) + // Output: + // Unused keys: []string{"email"} +} + +func ExampleDecode_weaklyTypedInput() { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + err = decoder.Decode(input) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: mapstructure.Person{Name:"123", Age:42, Emails:[]string{}} +} + +func ExampleDecode_tags() { + // Note that the mapstructure tags defined in the struct type + // can indicate which fields the values are mapped to. + type Person struct { + Name string `mapstructure:"person_name"` + Age int `mapstructure:"person_age"` + } + + input := map[string]interface{}{ + "person_name": "Mitchell", + "person_age": 91, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91} +} + +func ExampleDecode_embeddedStruct() { + // Squashing multiple embedded structs is allowed using the squash tag. + // This is demonstrated by creating a composite struct of multiple types + // and decoding into it. In this case, a person can carry with it both + // a Family and a Location, as well as their own FirstName. + type Family struct { + LastName string + } + type Location struct { + City string + } + type Person struct { + Family `mapstructure:",squash"` + Location `mapstructure:",squash"` + FirstName string + } + + input := map[string]interface{}{ + "FirstName": "Mitchell", + "LastName": "Hashimoto", + "City": "San Francisco", + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%s %s, %s", result.FirstName, result.LastName, result.City) + // Output: + // Mitchell Hashimoto, San Francisco +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go new file mode 100644 index 000000000..12b2772ac --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go @@ -0,0 +1,1776 @@ +package mapstructure + +import ( + "encoding/json" + "io" + "reflect" + "sort" + "strings" + "testing" +) + +type Basic struct { + Vstring string + Vint int + Vuint uint + Vbool bool + Vfloat float64 + Vextra string + vsilent bool + Vdata interface{} + VjsonInt int + VjsonFloat float64 + VjsonNumber json.Number +} + +type BasicSquash struct { + Test Basic `mapstructure:",squash"` +} + +type Embedded struct { + Basic + Vunique string +} + +type EmbeddedPointer struct { + *Basic + Vunique string +} + +type EmbeddedSquash struct { + Basic `mapstructure:",squash"` + Vunique string +} + +type SliceAlias []string + +type EmbeddedSlice struct { + SliceAlias `mapstructure:"slice_alias"` + Vunique string +} + +type ArrayAlias [2]string + +type EmbeddedArray struct { + ArrayAlias `mapstructure:"array_alias"` + Vunique string +} + +type SquashOnNonStructType struct { + InvalidSquashType int `mapstructure:",squash"` +} + +type Map struct { + Vfoo string + Vother map[string]string +} + +type MapOfStruct struct { + Value map[string]Basic +} + +type Nested struct { + Vfoo string + Vbar Basic +} + +type NestedPointer struct { + Vfoo string + Vbar *Basic +} + +type NilInterface struct { + W io.Writer +} + +type Slice struct { + Vfoo string + Vbar []string +} + +type SliceOfStruct struct { + Value []Basic +} + +type Array struct { + Vfoo string + Vbar [2]string +} + +type ArrayOfStruct struct { + Value [2]Basic +} + +type Func struct { + Foo func() string +} + +type Tagged struct { + Extra string `mapstructure:"bar,what,what"` + Value string `mapstructure:"foo"` +} + +type TypeConversionResult struct { + IntToFloat float32 + IntToUint uint + IntToBool bool + IntToString string + UintToInt int + UintToFloat float32 + UintToBool bool + UintToString string + BoolToInt int + BoolToUint uint + BoolToFloat float32 + BoolToString string + FloatToInt int + FloatToUint uint + FloatToBool bool + FloatToString string + SliceUint8ToString string + StringToSliceUint8 []byte + ArrayUint8ToString string + StringToInt int + StringToUint uint + StringToBool bool + StringToFloat float32 + StringToStrSlice []string + StringToIntSlice []int + StringToStrArray [1]string + StringToIntArray [1]int + SliceToMap map[string]interface{} + MapToSlice []interface{} + ArrayToMap map[string]interface{} + MapToArray [1]interface{} +} + +func TestBasicTypes(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + "vjsonInt": json.Number("1234"), + "vjsonFloat": json.Number("1234.5"), + "vjsonNumber": json.Number("1234.5"), + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Errorf("got an err: %s", err.Error()) + t.FailNow() + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vint) + } + + if result.Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vuint) + } + + if result.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbool) + } + + if result.Vfloat != 42.42 { + t.Errorf("vfloat value should be 42.42: %#v", result.Vfloat) + } + + if result.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vextra) + } + + if result.vsilent != false { + t.Error("vsilent should not be set, it is unexported") + } + + if result.Vdata != 42 { + t.Error("vdata should be valid") + } + + if result.VjsonInt != 1234 { + t.Errorf("vjsonint value should be 1234: %#v", result.VjsonInt) + } + + if result.VjsonFloat != 1234.5 { + t.Errorf("vjsonfloat value should be 1234.5: %#v", result.VjsonFloat) + } + + if !reflect.DeepEqual(result.VjsonNumber, json.Number("1234.5")) { + t.Errorf("vjsonnumber value should be '1234.5': %T, %#v", result.VjsonNumber, result.VjsonNumber) + } +} + +func TestBasic_IntWithFloat(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": float64(42), + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } +} + +func TestBasic_Merge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": 42, + } + + var result Basic + result.Vuint = 100 + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + expected := Basic{ + Vint: 42, + Vuint: 100, + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + +// Test for issue #46. +func TestBasic_Struct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vdata": map[string]interface{}{ + "vstring": "foo", + }, + } + + var result, inner Basic + result.Vdata = &inner + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + expected := Basic{ + Vdata: &Basic{ + Vstring: "foo", + }, + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + +func TestDecode_BasicSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + } + + var result BasicSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Test.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Test.Vstring) + } +} + +func TestDecodeFrom_BasicSquash(t *testing.T) { + t.Parallel() + + var v interface{} + var ok bool + + input := BasicSquash{ + Test: Basic{ + Vstring: "foo", + }, + } + + var result map[string]interface{} + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if _, ok = result["Test"]; ok { + t.Error("test should not be present in map") + } + + v, ok = result["Vstring"] + if !ok { + t.Error("vstring should be present in map") + } else if !reflect.DeepEqual(v, "foo") { + t.Errorf("vstring value should be 'foo': %#v", v) + } +} + +func TestDecode_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "innerfoo" { + t.Errorf("vstring value should be 'innerfoo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_EmbeddedPointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result EmbeddedPointer + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := EmbeddedPointer{ + Basic: &Basic{ + Vstring: "innerfoo", + }, + Vunique: "bar", + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + +func TestDecode_EmbeddedSlice(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "slice_alias": []string{"foo", "bar"}, + "vunique": "bar", + } + + var result EmbeddedSlice + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if !reflect.DeepEqual(result.SliceAlias, SliceAlias([]string{"foo", "bar"})) { + t.Errorf("slice value: %#v", result.SliceAlias) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_EmbeddedArray(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "array_alias": [2]string{"foo", "bar"}, + "vunique": "bar", + } + + var result EmbeddedArray + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if !reflect.DeepEqual(result.ArrayAlias, ArrayAlias([2]string{"foo", "bar"})) { + t.Errorf("array value: %#v", result.ArrayAlias) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_EmbeddedSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var result EmbeddedSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecodeFrom_EmbeddedSquash(t *testing.T) { + t.Parallel() + + var v interface{} + var ok bool + + input := EmbeddedSquash{ + Basic: Basic{ + Vstring: "foo", + }, + Vunique: "bar", + } + + var result map[string]interface{} + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if _, ok = result["Basic"]; ok { + t.Error("basic should not be present in map") + } + + v, ok = result["Vstring"] + if !ok { + t.Error("vstring should be present in map") + } else if !reflect.DeepEqual(v, "foo") { + t.Errorf("vstring value should be 'foo': %#v", v) + } + + v, ok = result["Vunique"] + if !ok { + t.Error("vunique should be present in map") + } else if !reflect.DeepEqual(v, "bar") { + t.Errorf("vunique value should be 'bar': %#v", v) + } +} + +func TestDecode_SquashOnNonStructType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "InvalidSquashType": 42, + } + + var result SquashOnNonStructType + err := Decode(input, &result) + if err == nil { + t.Fatal("unexpected success decoding invalid squash field type") + } else if !strings.Contains(err.Error(), "unsupported type for squash") { + t.Fatalf("unexpected error message for invalid squash field type: %s", err) + } +} + +func TestDecode_DecodeHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Kind, to reflect.Kind, v interface{}) (interface{}, error) { + if from == reflect.String && to != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_DecodeHookType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Type, to reflect.Type, v interface{}) (interface{}, error) { + if from.Kind() == reflect.String && + to.Kind() != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_Nil(t *testing.T) { + t.Parallel() + + var input interface{} + result := Basic{ + Vstring: "foo", + } + + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result.Vstring != "foo" { + t.Fatalf("bad: %#v", result.Vstring) + } +} + +func TestDecode_NilInterfaceHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "w": "", + } + + decodeHook := func(f, t reflect.Type, v interface{}) (interface{}, error) { + if t.String() == "io.Writer" { + return nil, nil + } + + return v, nil + } + + var result NilInterface + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.W != nil { + t.Errorf("W should be nil: %#v", result.W) + } +} + +func TestDecode_FuncHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "baz", + } + + decodeHook := func(f, t reflect.Type, v interface{}) (interface{}, error) { + if t.Kind() != reflect.Func { + return v, nil + } + val := v.(string) + return func() string { return val }, nil + } + + var result Func + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Foo() != "baz" { + t.Errorf("Foo call result should be 'baz': %s", result.Foo()) + } +} + +func TestDecode_NonStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + + var result map[string]string + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result["foo"] != "bar" { + t.Fatal("foo is not bar") + } +} + +func TestDecode_StructMatch(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vbar": Basic{ + Vstring: "foo", + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("bad: %#v", result) + } +} + +func TestDecode_TypeConversion(t *testing.T) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "SliceUint8ToString": []uint8("foo"), + "StringToSliceUint8": "foo", + "ArrayUint8ToString": [3]uint8{'f', 'o', 'o'}, + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "StringToStrSlice": "A", + "StringToIntSlice": "42", + "StringToStrArray": "A", + "StringToIntArray": "42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + "ArrayToMap": []interface{}{}, + "MapToArray": map[string]interface{}{}, + } + + expectedResultStrict := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + UintToInt: 42, + UintToFloat: 42, + BoolToInt: 0, + BoolToUint: 0, + BoolToFloat: 0, + FloatToInt: 42, + FloatToUint: 42, + } + + expectedResultWeak := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + IntToBool: true, + IntToString: "42", + UintToInt: 42, + UintToFloat: 42, + UintToBool: true, + UintToString: "42", + BoolToInt: 1, + BoolToUint: 1, + BoolToFloat: 1, + BoolToString: "1", + FloatToInt: 42, + FloatToUint: 42, + FloatToBool: true, + FloatToString: "42.42", + SliceUint8ToString: "foo", + StringToSliceUint8: []byte("foo"), + ArrayUint8ToString: "foo", + StringToInt: 42, + StringToUint: 42, + StringToBool: true, + StringToFloat: 42.42, + StringToStrSlice: []string{"A"}, + StringToIntSlice: []int{42}, + StringToStrArray: [1]string{"A"}, + StringToIntArray: [1]int{42}, + SliceToMap: map[string]interface{}{}, + MapToSlice: []interface{}{}, + ArrayToMap: map[string]interface{}{}, + MapToArray: [1]interface{}{}, + } + + // Test strict type conversion + var resultStrict TypeConversionResult + err := Decode(input, &resultStrict) + if err == nil { + t.Errorf("should return an error") + } + if !reflect.DeepEqual(resultStrict, expectedResultStrict) { + t.Errorf("expected %v, got: %v", expectedResultStrict, resultStrict) + } + + // Test weak type conversion + var decoder *Decoder + var resultWeak TypeConversionResult + + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &resultWeak, + } + + decoder, err = NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if !reflect.DeepEqual(resultWeak, expectedResultWeak) { + t.Errorf("expected \n%#v, got: \n%#v", expectedResultWeak, resultWeak) + } +} + +func TestDecoder_ErrorUnused(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "hello", + "foo": "bar", + } + + var result Basic + config := &DecoderConfig{ + ErrorUnused: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err == nil { + t.Fatal("expected error") + } +} + +func TestMap(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vother == nil { + t.Fatal("vother should not be nil") + } + + if len(result.Vother) != 2 { + t.Error("vother should have two items") + } + + if result.Vother["foo"] != "foo" { + t.Errorf("'foo' key should be foo, got: %#v", result.Vother["foo"]) + } + + if result.Vother["bar"] != "bar" { + t.Errorf("'bar' key should be bar, got: %#v", result.Vother["bar"]) + } +} + +func TestMapMerge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + result.Vother = map[string]string{"hello": "world"} + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + expected := map[string]string{ + "foo": "foo", + "bar": "bar", + "hello": "world", + } + if !reflect.DeepEqual(result.Vother, expected) { + t.Errorf("bad: %#v", result.Vother) + } +} + +func TestMapOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Value == nil { + t.Fatal("value should not be nil") + } + + if len(result.Value) != 2 { + t.Error("value should have two items") + } + + if result.Value["foo"].Vstring != "one" { + t.Errorf("foo value should be 'one', got: %s", result.Value["foo"].Vstring) + } + + if result.Value["bar"].Vstring != "two" { + t.Errorf("bar value should be 'two', got: %s", result.Value["bar"].Vstring) + } +} + +func TestNestedType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestNestedTypePointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": &map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result NestedPointer + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +// Test for issue #46. +func TestNestedTypeInterface(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": &map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + + "vdata": map[string]interface{}{ + "vstring": "bar", + }, + }, + } + + var result NestedPointer + result.Vbar = new(Basic) + result.Vbar.Vdata = new(Basic) + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } + + if result.Vbar.Vdata.(*Basic).Vstring != "bar" { + t.Errorf("vstring value should be 'bar': %#v", result.Vbar.Vdata.(*Basic).Vstring) + } +} + +func TestSlice(t *testing.T) { + t.Parallel() + + inputStringSlice := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + inputStringSlicePointer := map[string]interface{}{ + "vfoo": "foo", + "vbar": &[]string{"foo", "bar", "baz"}, + } + + outputStringSlice := &Slice{ + "foo", + []string{"foo", "bar", "baz"}, + } + + testSliceInput(t, inputStringSlice, outputStringSlice) + testSliceInput(t, inputStringSlicePointer, outputStringSlice) +} + +func TestInvalidSlice(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": 42, + } + + result := Slice{} + err := Decode(input, &result) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestSliceOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got unexpected error: %s", err) + } + + if len(result.Value) != 2 { + t.Fatalf("expected two values, got %d", len(result.Value)) + } + + if result.Value[0].Vstring != "one" { + t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring) + } + + if result.Value[1].Vstring != "two" { + t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring) + } +} + +func TestSliceToMap(t *testing.T) { + t.Parallel() + + input := []map[string]interface{}{ + { + "foo": "bar", + }, + { + "bar": "baz", + }, + } + + var result map[string]interface{} + err := WeakDecode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("bad: %#v", result) + } +} + +func TestArray(t *testing.T) { + t.Parallel() + + inputStringArray := map[string]interface{}{ + "vfoo": "foo", + "vbar": [2]string{"foo", "bar"}, + } + + inputStringArrayPointer := map[string]interface{}{ + "vfoo": "foo", + "vbar": &[2]string{"foo", "bar"}, + } + + outputStringArray := &Array{ + "foo", + [2]string{"foo", "bar"}, + } + + testArrayInput(t, inputStringArray, outputStringArray) + testArrayInput(t, inputStringArrayPointer, outputStringArray) +} + +func TestInvalidArray(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": 42, + } + + result := Array{} + err := Decode(input, &result) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestArrayOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result ArrayOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got unexpected error: %s", err) + } + + if len(result.Value) != 2 { + t.Fatalf("expected two values, got %d", len(result.Value)) + } + + if result.Value[0].Vstring != "one" { + t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring) + } + + if result.Value[1].Vstring != "two" { + t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring) + } +} + +func TestArrayToMap(t *testing.T) { + t.Parallel() + + input := []map[string]interface{}{ + { + "foo": "bar", + }, + { + "bar": "baz", + }, + } + + var result map[string]interface{} + err := WeakDecode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("bad: %#v", result) + } +} + +func TestMapOutputForStructuredInputs(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + in interface{} + target interface{} + out interface{} + wantErr bool + }{ + { + "basic struct input", + &Basic{ + Vstring: "vstring", + Vint: 2, + Vuint: 3, + Vbool: true, + Vfloat: 4.56, + Vextra: "vextra", + vsilent: true, + Vdata: []byte("data"), + }, + &map[string]interface{}{}, + &map[string]interface{}{ + "Vstring": "vstring", + "Vint": 2, + "Vuint": uint(3), + "Vbool": true, + "Vfloat": 4.56, + "Vextra": "vextra", + "Vdata": []byte("data"), + "VjsonInt": 0, + "VjsonFloat": 0.0, + "VjsonNumber": json.Number(""), + }, + false, + }, + { + "embedded struct input", + &Embedded{ + Vunique: "vunique", + Basic: Basic{ + Vstring: "vstring", + Vint: 2, + Vuint: 3, + Vbool: true, + Vfloat: 4.56, + Vextra: "vextra", + vsilent: true, + Vdata: []byte("data"), + }, + }, + &map[string]interface{}{}, + &map[string]interface{}{ + "Vunique": "vunique", + "Basic": map[string]interface{}{ + "Vstring": "vstring", + "Vint": 2, + "Vuint": uint(3), + "Vbool": true, + "Vfloat": 4.56, + "Vextra": "vextra", + "Vdata": []byte("data"), + "VjsonInt": 0, + "VjsonFloat": 0.0, + "VjsonNumber": json.Number(""), + }, + }, + false, + }, + { + "slice input - should error", + []string{"foo", "bar"}, + &map[string]interface{}{}, + &map[string]interface{}{}, + true, + }, + { + "struct with slice property", + &Slice{ + Vfoo: "vfoo", + Vbar: []string{"foo", "bar"}, + }, + &map[string]interface{}{}, + &map[string]interface{}{ + "Vfoo": "vfoo", + "Vbar": []string{"foo", "bar"}, + }, + false, + }, + { + "struct with slice of struct property", + &SliceOfStruct{ + Value: []Basic{ + Basic{ + Vstring: "vstring", + Vint: 2, + Vuint: 3, + Vbool: true, + Vfloat: 4.56, + Vextra: "vextra", + vsilent: true, + Vdata: []byte("data"), + }, + }, + }, + &map[string]interface{}{}, + &map[string]interface{}{ + "Value": []Basic{ + Basic{ + Vstring: "vstring", + Vint: 2, + Vuint: 3, + Vbool: true, + Vfloat: 4.56, + Vextra: "vextra", + vsilent: true, + Vdata: []byte("data"), + }, + }, + }, + false, + }, + { + "struct with map property", + &Map{ + Vfoo: "vfoo", + Vother: map[string]string{"vother": "vother"}, + }, + &map[string]interface{}{}, + &map[string]interface{}{ + "Vfoo": "vfoo", + "Vother": map[string]string{ + "vother": "vother", + }}, + false, + }, + { + "tagged struct", + &Tagged{ + Extra: "extra", + Value: "value", + }, + &map[string]string{}, + &map[string]string{ + "bar": "extra", + "foo": "value", + }, + false, + }, + { + "omit tag struct", + &struct { + Value string `mapstructure:"value"` + Omit string `mapstructure:"-"` + }{ + Value: "value", + Omit: "omit", + }, + &map[string]string{}, + &map[string]string{ + "value": "value", + }, + false, + }, + { + "decode to wrong map type", + &struct { + Value string + }{ + Value: "string", + }, + &map[string]int{}, + &map[string]int{}, + true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := Decode(tt.in, tt.target); (err != nil) != tt.wantErr { + t.Fatalf("%q: TestMapOutputForStructuredInputs() unexpected error: %s", tt.name, err) + } + + if !reflect.DeepEqual(tt.out, tt.target) { + t.Fatalf("%q: TestMapOutputForStructuredInputs() expected: %#v, got: %#v", tt.name, tt.out, tt.target) + } + }) + } +} + +func TestInvalidType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": 42, + } + + var result Basic + err := Decode(input, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok := err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "'Vstring' expected type 'string', got unconvertible type 'int'" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegIntUint := map[string]interface{}{ + "vuint": -42, + } + + err = Decode(inputNegIntUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegFloatUint := map[string]interface{}{ + "vuint": -42.0, + } + + err = Decode(inputNegFloatUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42.000000 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestDecodeMetadata(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "Vuint": 42, + "foo": "bar", + }, + "bar": "nil", + } + + var md Metadata + var result Nested + + err := DecodeMetadata(input, &result, &md) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"} + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{"Vbar.foo", "bar"} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestMetadata(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "Vuint": 42, + "foo": "bar", + }, + "bar": "nil", + } + + var md Metadata + var result Nested + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"} + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{"Vbar.foo", "bar"} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestMetadata_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vstring", "Vunique"} + + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestNonPtrValue(t *testing.T) { + t.Parallel() + + err := Decode(map[string]interface{}{}, Basic{}) + if err == nil { + t.Fatal("error should exist") + } + + if err.Error() != "result must be a pointer" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestTagged(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + err := Decode(input, &result) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if result.Value != "bar" { + t.Errorf("value should be 'bar', got: %#v", result.Value) + } + + if result.Extra != "value" { + t.Errorf("extra should be 'value', got: %#v", result.Extra) + } +} + +func TestWeakDecode(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "4", + "bar": "value", + } + + var result struct { + Foo int + Bar string + } + + if err := WeakDecode(input, &result); err != nil { + t.Fatalf("err: %s", err) + } + if result.Foo != 4 { + t.Fatalf("bad: %#v", result) + } + if result.Bar != "value" { + t.Fatalf("bad: %#v", result) + } +} + +func TestWeakDecodeMetadata(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "4", + "bar": "value", + "unused": "value", + } + + var md Metadata + var result struct { + Foo int + Bar string + } + + if err := WeakDecodeMetadata(input, &result, &md); err != nil { + t.Fatalf("err: %s", err) + } + if result.Foo != 4 { + t.Fatalf("bad: %#v", result) + } + if result.Bar != "value" { + t.Fatalf("bad: %#v", result) + } + + expectedKeys := []string{"Bar", "Foo"} + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{"unused"} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) { + var result Slice + err := Decode(input, &result) + if err != nil { + t.Fatalf("got error: %s", err) + } + + if result.Vfoo != expected.Vfoo { + t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo) + } + + if result.Vbar == nil { + t.Fatalf("Vbar a slice, got '%#v'", result.Vbar) + } + + if len(result.Vbar) != len(expected.Vbar) { + t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar)) + } + + for i, v := range result.Vbar { + if v != expected.Vbar[i] { + t.Errorf( + "Vbar[%d] should be '%#v', got '%#v'", + i, expected.Vbar[i], v) + } + } +} + +func testArrayInput(t *testing.T, input map[string]interface{}, expected *Array) { + var result Array + err := Decode(input, &result) + if err != nil { + t.Fatalf("got error: %s", err) + } + + if result.Vfoo != expected.Vfoo { + t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo) + } + + if result.Vbar == [2]string{} { + t.Fatalf("Vbar a slice, got '%#v'", result.Vbar) + } + + if len(result.Vbar) != len(expected.Vbar) { + t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar)) + } + + for i, v := range result.Vbar { + if v != expected.Vbar[i] { + t.Errorf( + "Vbar[%d] should be '%#v', got '%#v'", + i, expected.Vbar[i], v) + } + } +} diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore new file mode 100644 index 000000000..720c13cba --- /dev/null +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +*.test +. +.idea +gomega.iml diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml new file mode 100644 index 000000000..8b982a9ab --- /dev/null +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + +install: + - go get -v ./... + - go get github.com/onsi/ginkgo + - go install github.com/onsi/ginkgo/ginkgo + +script: $HOME/gopath/bin/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race && go vet diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md new file mode 100644 index 000000000..2ee5d9b74 --- /dev/null +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -0,0 +1,99 @@ +## HEAD + +### Features +- Make string pretty diff user configurable (#273) [eb112ce, 649b44d] + +### Fixes +- Use httputil.DumpRequest to pretty-print unhandled requests (#278) [a4ff0fc, b7d1a52] +- fix typo floa32 > float32 (#272) [041ae3b, 6e33911] +- Fix link to documentation on adding your own matchers (#270) [bb2c830, fcebc62] +- Use setters and getters to avoid race condition (#262) [13057c3, a9c79f1] +- Avoid sending a signal if the process is not alive (#259) [b8043e5, 4fc1762] +- Improve message from AssignableToTypeOf when expected value is nil (#281) [9c1fb20] + +## 1.3.0 + +Improvements: + +- The `Equal` matcher matches byte slices more performantly. +- Improved how `MatchError` matches error strings. +- `MatchXML` ignores the order of xml node attributes. +- Improve support for XUnit style golang tests. ([#254](https://github.com/onsi/gomega/issues/254)) + +Bug Fixes: + +- Diff generation now handles multi-byte sequences correctly. +- Multiple goroutines can now call `gexec.Build` concurrently. + +## 1.2.0 + +Improvements: + +- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout. +- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests. +- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel. +- Added `HavePrefix` and `HaveSuffix` matchers. +- `ghttp` can now handle concurrent requests. +- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`. +- Improved `ghttp`'s behavior around failing assertions and panics: + - If a registered handler makes a failing assertion `ghttp` will return `500`. + - If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive. +- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives. +- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher +- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers +- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time. +- Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer. + +Bug Fixes: +- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure. +- `ContainElement` no longer bails if a passed-in matcher errors. + +## 1.0 (8/2/2014) + +No changes. Dropping "beta" from the version number. + +## 1.0.0-beta (7/8/2014) +Breaking Changes: + +- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead. +- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher + +New Test-Support Features: + +- `ghttp`: supports testing http clients + - Provides a flexible fake http server + - Provides a collection of chainable http handlers that perform assertions. +- `gbytes`: supports making ordered assertions against streams of data + - Provides a `gbytes.Buffer` + - Provides a `Say` matcher to perform ordered assertions against output data +- `gexec`: supports testing external processes + - Provides support for building Go binaries + - Wraps and starts `exec.Cmd` commands + - Makes it easy to assert against stdout and stderr + - Makes it easy to send signals and wait for processes to exit + - Provides an `Exit` matcher to assert against exit code. + +DSL Changes: + +- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs. +- The default timeouts for `Eventually` and `Consistently` are now configurable. + +New Matchers: + +- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map. +- `BeTemporally`: like `BeNumerically` but for `time.Time` +- `HaveKeyWithValue`: asserts a map has a given key with the given value. + +Updated Matchers: + +- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher. +- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed. + +Misc: + +- Start using semantic versioning +- Start maintaining changelog + +Major refactor: + +- Pull out Gomega's internal to `internal` diff --git a/vendor/github.com/onsi/gomega/CONTRIBUTING.md b/vendor/github.com/onsi/gomega/CONTRIBUTING.md new file mode 100644 index 000000000..0d7a09928 --- /dev/null +++ b/vendor/github.com/onsi/gomega/CONTRIBUTING.md @@ -0,0 +1,14 @@ +# Contributing to Gomega + +Your contributions to Gomega are essential for its long-term maintenance and improvement. To make a contribution: + +- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code! +- Ensure adequate test coverage: + - Make sure to add appropriate unit tests + - Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR + - Please run following linter locally `go vet ./...` and make sure output does not contain any warnings +- Update the documentation. In addition to standard `godoc` comments Gomega has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR. + +If you're a committer, check out RELEASING.md to learn how to cut a release. + +Thanks for supporting Gomega! diff --git a/vendor/github.com/onsi/gomega/LICENSE b/vendor/github.com/onsi/gomega/LICENSE new file mode 100644 index 000000000..9415ee72c --- /dev/null +++ b/vendor/github.com/onsi/gomega/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/gomega/README.md b/vendor/github.com/onsi/gomega/README.md new file mode 100644 index 000000000..159be3590 --- /dev/null +++ b/vendor/github.com/onsi/gomega/README.md @@ -0,0 +1,21 @@ +![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png) + +[![Build Status](https://travis-ci.org/onsi/gomega.svg)](https://travis-ci.org/onsi/gomega) + +Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers). + +If you have a question, comment, bug report, feature request, etc. please open a GitHub issue. + +## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang + +Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/) + +## Community Matchers + +A collection of community matchers is available on the [wiki](https://github.com/onsi/gomega/wiki). + +## License + +Gomega is MIT-Licensed + +The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license. diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md new file mode 100644 index 000000000..998d64ee7 --- /dev/null +++ b/vendor/github.com/onsi/gomega/RELEASING.md @@ -0,0 +1,12 @@ +A Gomega release is a tagged sha and a GitHub release. To cut a release: + +1. Ensure CHANGELOG.md is up to date. + - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Categorize the changes into + - Breaking Changes (requires a major version) + - New Features (minor version) + - Fixes (fix version) + - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) +2. Update GOMEGA_VERSION in `gomega_dsl.go` +3. Push a commit with the version number as the commit message (e.g. `v1.3.0`) +4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes. diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go new file mode 100644 index 000000000..6559525f1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -0,0 +1,382 @@ +/* +Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information. +*/ +package format + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects +var MaxDepth = uint(10) + +/* +By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output. + +Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead. + +Note that GoString and String don't always have all the information you need to understand why a test failed! +*/ +var UseStringerRepresentation = false + +/* +Print the content of context objects. By default it will be suppressed. + +Set PrintContextObjects = true to enable printing of the context internals. +*/ +var PrintContextObjects = false + +// TruncatedDiff choose if we should display a truncated pretty diff or not +var TruncatedDiff = true + +// Ctx interface defined here to keep backwards compatability with go < 1.7 +// It matches the context.Context interface +type Ctx interface { + Deadline() (deadline time.Time, ok bool) + Done() <-chan struct{} + Err() error + Value(key interface{}) interface{} +} + +var contextType = reflect.TypeOf((*Ctx)(nil)).Elem() +var timeType = reflect.TypeOf(time.Time{}) + +//The default indentation string emitted by the format package +var Indent = " " + +var longFormThreshold = 20 + +/* +Generates a formatted matcher success/failure message of the form: + + Expected + + + + +If expected is omited, then the message looks like: + + Expected + + +*/ +func Message(actual interface{}, message string, expected ...interface{}) string { + if len(expected) == 0 { + return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message) + } + return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1)) +} + +/* + +Generates a nicely formatted matcher success / failure message + +Much like Message(...), but it attempts to pretty print diffs in strings + +Expected + : "...aaaaabaaaaa..." +to equal | + : "...aaaaazaaaaa..." + +*/ + +func MessageWithDiff(actual, message, expected string) string { + if TruncatedDiff && len(actual) >= truncateThreshold && len(expected) >= truncateThreshold { + diffPoint := findFirstMismatch(actual, expected) + formattedActual := truncateAndFormat(actual, diffPoint) + formattedExpected := truncateAndFormat(expected, diffPoint) + + spacesBeforeFormattedMismatch := findFirstMismatch(formattedActual, formattedExpected) + + tabLength := 4 + spaceFromMessageToActual := tabLength + len(": ") - len(message) + padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" + return Message(formattedActual, message+padding, formattedExpected) + } + return Message(actual, message, expected) +} + +func truncateAndFormat(str string, index int) string { + leftPadding := `...` + rightPadding := `...` + + start := index - charactersAroundMismatchToInclude + if start < 0 { + start = 0 + leftPadding = "" + } + + // slice index must include the mis-matched character + lengthOfMismatchedCharacter := 1 + end := index + charactersAroundMismatchToInclude + lengthOfMismatchedCharacter + if end > len(str) { + end = len(str) + rightPadding = "" + + } + return fmt.Sprintf("\"%s\"", leftPadding+str[start:end]+rightPadding) +} + +func findFirstMismatch(a, b string) int { + aSlice := strings.Split(a, "") + bSlice := strings.Split(b, "") + + for index, str := range aSlice { + if index > len(bSlice)-1 { + return index + } + if str != bSlice[index] { + return index + } + } + + if len(b) > len(a) { + return len(a) + 1 + } + + return 0 +} + +const ( + truncateThreshold = 50 + charactersAroundMismatchToInclude = 5 +) + +/* +Pretty prints the passed in object at the passed in indentation level. + +Object recurses into deeply nested objects emitting pretty-printed representations of their components. + +Modify format.MaxDepth to control how deep the recursion is allowed to go +Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of +recursing into the object. + +Set PrintContextObjects to true to print the content of objects implementing context.Context +*/ +func Object(object interface{}, indentation uint) string { + indent := strings.Repeat(Indent, int(indentation)) + value := reflect.ValueOf(object) + return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation)) +} + +/* +IndentString takes a string and indents each line by the specified amount. +*/ +func IndentString(s string, indentation uint) string { + components := strings.Split(s, "\n") + result := "" + indent := strings.Repeat(Indent, int(indentation)) + for i, component := range components { + result += indent + component + if i < len(components)-1 { + result += "\n" + } + } + + return result +} + +func formatType(object interface{}) string { + t := reflect.TypeOf(object) + if t == nil { + return "nil" + } + switch t.Kind() { + case reflect.Chan: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Ptr: + return fmt.Sprintf("%T | %p", object, object) + case reflect.Slice: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Map: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d", object, v.Len()) + default: + return fmt.Sprintf("%T", object) + } +} + +func formatValue(value reflect.Value, indentation uint) string { + if indentation > MaxDepth { + return "..." + } + + if isNilValue(value) { + return "nil" + } + + if UseStringerRepresentation { + if value.CanInterface() { + obj := value.Interface() + switch x := obj.(type) { + case fmt.GoStringer: + return x.GoString() + case fmt.Stringer: + return x.String() + } + } + } + + if !PrintContextObjects { + if value.Type().Implements(contextType) && indentation > 1 { + return "" + } + } + + switch value.Kind() { + case reflect.Bool: + return fmt.Sprintf("%v", value.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", value.Uint()) + case reflect.Uintptr: + return fmt.Sprintf("0x%x", value.Uint()) + case reflect.Float32, reflect.Float64: + return fmt.Sprintf("%v", value.Float()) + case reflect.Complex64, reflect.Complex128: + return fmt.Sprintf("%v", value.Complex()) + case reflect.Chan: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Func: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Ptr: + return formatValue(value.Elem(), indentation) + case reflect.Slice: + return formatSlice(value, indentation) + case reflect.String: + return formatString(value.String(), indentation) + case reflect.Array: + return formatSlice(value, indentation) + case reflect.Map: + return formatMap(value, indentation) + case reflect.Struct: + if value.Type() == timeType && value.CanInterface() { + t, _ := value.Interface().(time.Time) + return t.Format(time.RFC3339Nano) + } + return formatStruct(value, indentation) + case reflect.Interface: + return formatValue(value.Elem(), indentation) + default: + if value.CanInterface() { + return fmt.Sprintf("%#v", value.Interface()) + } + return fmt.Sprintf("%#v", value) + } +} + +func formatString(object interface{}, indentation uint) string { + if indentation == 1 { + s := fmt.Sprintf("%s", object) + components := strings.Split(s, "\n") + result := "" + for i, component := range components { + if i == 0 { + result += component + } else { + result += Indent + component + } + if i < len(components)-1 { + result += "\n" + } + } + + return fmt.Sprintf("%s", result) + } else { + return fmt.Sprintf("%q", object) + } +} + +func formatSlice(v reflect.Value, indentation uint) string { + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())) { + return formatString(v.Bytes(), indentation) + } + + l := v.Len() + result := make([]string, l) + longest := 0 + for i := 0; i < l; i++ { + result[i] = formatValue(v.Index(i), indentation+1) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("[%s]", strings.Join(result, ", ")) +} + +func formatMap(v reflect.Value, indentation uint) string { + l := v.Len() + result := make([]string, l) + + longest := 0 + for i, key := range v.MapKeys() { + value := v.MapIndex(key) + result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1), formatValue(value, indentation+1)) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) +} + +func formatStruct(v reflect.Value, indentation uint) string { + t := v.Type() + + l := v.NumField() + result := []string{} + longest := 0 + for i := 0; i < l; i++ { + structField := t.Field(i) + fieldEntry := v.Field(i) + representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1)) + result = append(result, representation) + if len(representation) > longest { + longest = len(representation) + } + } + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) +} + +func isNilValue(a reflect.Value) bool { + switch a.Kind() { + case reflect.Invalid: + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return a.IsNil() + } + + return false +} + +/* +Returns true when the string is entirely made of printable runes, false otherwise. +*/ +func isPrintableString(str string) bool { + for _, runeValue := range str { + if !strconv.IsPrint(runeValue) { + return false + } + } + return true +} diff --git a/vendor/github.com/onsi/gomega/format/format_suite_test.go b/vendor/github.com/onsi/gomega/format/format_suite_test.go new file mode 100644 index 000000000..8e65a9529 --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format_suite_test.go @@ -0,0 +1,13 @@ +package format_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestFormat(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Format Suite") +} diff --git a/vendor/github.com/onsi/gomega/format/format_test.go b/vendor/github.com/onsi/gomega/format/format_test.go new file mode 100644 index 000000000..9ea781379 --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format_test.go @@ -0,0 +1,627 @@ +package format_test + +import ( + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" +) + +//recursive struct + +type StringAlias string +type ByteAlias []byte +type IntAlias int + +type AStruct struct { + Exported string +} + +type SimpleStruct struct { + Name string + Enumeration int + Veritas bool + Data []byte + secret uint32 +} + +type ComplexStruct struct { + Strings []string + SimpleThings []*SimpleStruct + DataMaps map[int]ByteAlias +} + +type SecretiveStruct struct { + boolValue bool + intValue int + uintValue uint + uintptrValue uintptr + floatValue float32 + complexValue complex64 + chanValue chan bool + funcValue func() + pointerValue *int + sliceValue []string + byteSliceValue []byte + stringValue string + arrValue [3]int + byteArrValue [3]byte + mapValue map[string]int + structValue AStruct + interfaceValue interface{} +} + +type GoStringer struct { +} + +func (g GoStringer) GoString() string { + return "go-string" +} + +func (g GoStringer) String() string { + return "string" +} + +type Stringer struct { +} + +func (g Stringer) String() string { + return "string" +} + +type ctx struct { +} + +func (c *ctx) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (c *ctx) Done() <-chan struct{} { + return nil +} + +func (c *ctx) Err() error { + return nil +} + +func (c *ctx) Value(key interface{}) interface{} { + return nil +} + +var _ = Describe("Format", func() { + match := func(typeRepresentation string, valueRepresentation string, args ...interface{}) types.GomegaMatcher { + if len(args) > 0 { + valueRepresentation = fmt.Sprintf(valueRepresentation, args...) + } + return Equal(fmt.Sprintf("%s<%s>: %s", Indent, typeRepresentation, valueRepresentation)) + } + + matchRegexp := func(typeRepresentation string, valueRepresentation string, args ...interface{}) types.GomegaMatcher { + if len(args) > 0 { + valueRepresentation = fmt.Sprintf(valueRepresentation, args...) + } + return MatchRegexp(fmt.Sprintf("%s<%s>: %s", Indent, typeRepresentation, valueRepresentation)) + } + + hashMatchingRegexp := func(entries ...string) string { + entriesSwitch := "(" + strings.Join(entries, "|") + ")" + arr := make([]string, len(entries)) + for i := range arr { + arr[i] = entriesSwitch + } + return "{" + strings.Join(arr, ", ") + "}" + } + + Describe("Message", func() { + Context("with only an actual value", func() { + It("should print out an indented formatted representation of the value and the message", func() { + Expect(Message(3, "to be three.")).Should(Equal("Expected\n : 3\nto be three.")) + }) + }) + + Context("with an actual and an expected value", func() { + It("should print out an indented formatted representatino of both values, and the message", func() { + Expect(Message(3, "to equal", 4)).Should(Equal("Expected\n : 3\nto equal\n : 4")) + }) + }) + }) + + Describe("MessageWithDiff", func() { + It("shows the exact point where two long strings differ", func() { + stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + Expect(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedLongStringFailureMessage)) + }) + + It("truncates the start of long strings that differ only at their end", func() { + stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab" + stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaz" + + Expect(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedTruncatedStartStringFailureMessage)) + }) + + It("truncates the start of long strings that differ only in length", func() { + smallString := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + largeString := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + Expect(MessageWithDiff(largeString, "to equal", smallString)).Should(Equal(expectedTruncatedStartSizeFailureMessage)) + Expect(MessageWithDiff(smallString, "to equal", largeString)).Should(Equal(expectedTruncatedStartSizeSwappedFailureMessage)) + }) + + It("truncates the end of long strings that differ only at their start", func() { + stringWithB := "baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + stringWithZ := "zaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + Expect(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedTruncatedEndStringFailureMessage)) + }) + + It("handles multi-byte sequences correctly", func() { + stringA := "• abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz1" + stringB := "• abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" + + Expect(MessageWithDiff(stringA, "to equal", stringB)).Should(Equal(expectedTruncatedMultiByteFailureMessage)) + }) + + Context("With truncated diff disabled", func() { + BeforeEach(func() { + TruncatedDiff = false + }) + + AfterEach(func() { + TruncatedDiff = true + }) + + It("should show the full diff", func() { + stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + Expect(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedFullFailureDiff)) + }) + }) + }) + + Describe("IndentString", func() { + It("should indent the string", func() { + Expect(IndentString("foo\n bar\nbaz", 2)).Should(Equal(" foo\n bar\n baz")) + }) + }) + + Describe("Object", func() { + Describe("formatting boolean values", func() { + It("should give the type and format values correctly", func() { + Expect(Object(true, 1)).Should(match("bool", "true")) + Expect(Object(false, 1)).Should(match("bool", "false")) + }) + }) + + Describe("formatting numbers", func() { + It("should give the type and format values correctly", func() { + Expect(Object(int(3), 1)).Should(match("int", "3")) + Expect(Object(int8(3), 1)).Should(match("int8", "3")) + Expect(Object(int16(3), 1)).Should(match("int16", "3")) + Expect(Object(int32(3), 1)).Should(match("int32", "3")) + Expect(Object(int64(3), 1)).Should(match("int64", "3")) + + Expect(Object(uint(3), 1)).Should(match("uint", "3")) + Expect(Object(uint8(3), 1)).Should(match("uint8", "3")) + Expect(Object(uint16(3), 1)).Should(match("uint16", "3")) + Expect(Object(uint32(3), 1)).Should(match("uint32", "3")) + Expect(Object(uint64(3), 1)).Should(match("uint64", "3")) + }) + + It("should handle uintptr differently", func() { + Expect(Object(uintptr(3), 1)).Should(match("uintptr", "0x3")) + }) + }) + + Describe("formatting channels", func() { + It("should give the type and format values correctly", func() { + c := make(chan<- bool, 3) + c <- true + c <- false + Expect(Object(c, 1)).Should(match("chan<- bool | len:2, cap:3", "%v", c)) + }) + }) + + Describe("formatting strings", func() { + It("should give the type and format values correctly", func() { + s := "a\nb\nc" + Expect(Object(s, 1)).Should(match("string", `a + b + c`)) + }) + }) + + Describe("formatting []byte slices", func() { + Context("when the slice is made of printable bytes", func() { + It("should present it as string", func() { + b := []byte("a b c") + Expect(Object(b, 1)).Should(matchRegexp(`\[\]uint8 \| len:5, cap:\d+`, `a b c`)) + }) + }) + Context("when the slice contains non-printable bytes", func() { + It("should present it as slice", func() { + b := []byte("a b c\n\x01\x02\x03\xff\x1bH") + Expect(Object(b, 1)).Should(matchRegexp(`\[\]uint8 \| len:12, cap:\d+`, `\[97, 32, 98, 32, 99, 10, 1, 2, 3, 255, 27, 72\]`)) + }) + }) + }) + + Describe("formatting functions", func() { + It("should give the type and format values correctly", func() { + f := func(a string, b []int) ([]byte, error) { + return []byte("abc"), nil + } + Expect(Object(f, 1)).Should(match("func(string, []int) ([]uint8, error)", "%v", f)) + }) + }) + + Describe("formatting pointers", func() { + It("should give the type and dereference the value to format it correctly", func() { + a := 3 + Expect(Object(&a, 1)).Should(match(fmt.Sprintf("*int | %p", &a), "3")) + }) + + Context("when there are pointers to pointers...", func() { + It("should recursively deference the pointer until it gets to a value", func() { + a := 3 + var b *int + var c **int + var d ***int + b = &a + c = &b + d = &c + + Expect(Object(d, 1)).Should(match(fmt.Sprintf("***int | %p", d), "3")) + }) + }) + + Context("when the pointer points to nil", func() { + It("should say nil and not explode", func() { + var a *AStruct + Expect(Object(a, 1)).Should(match("*format_test.AStruct | 0x0", "nil")) + }) + }) + }) + + Describe("formatting arrays", func() { + It("should give the type and format values correctly", func() { + w := [3]string{"Jed Bartlet", "Toby Ziegler", "CJ Cregg"} + Expect(Object(w, 1)).Should(match("[3]string", `["Jed Bartlet", "Toby Ziegler", "CJ Cregg"]`)) + }) + + Context("with byte arrays", func() { + It("should give the type and format values correctly", func() { + w := [3]byte{17, 28, 19} + Expect(Object(w, 1)).Should(match("[3]uint8", `[17, 28, 19]`)) + }) + }) + }) + + Describe("formatting slices", func() { + It("should include the length and capacity in the type information", func() { + s := make([]bool, 3, 4) + Expect(Object(s, 1)).Should(match("[]bool | len:3, cap:4", "[false, false, false]")) + }) + + Context("when the slice contains long entries", func() { + It("should format the entries with newlines", func() { + w := []string{"Josiah Edward Bartlet", "Toby Ziegler", "CJ Cregg"} + expected := `[ + "Josiah Edward Bartlet", + "Toby Ziegler", + "CJ Cregg", + ]` + Expect(Object(w, 1)).Should(match("[]string | len:3, cap:3", expected)) + }) + }) + }) + + Describe("formatting maps", func() { + It("should include the length in the type information", func() { + m := make(map[int]bool, 5) + m[3] = true + m[4] = false + Expect(Object(m, 1)).Should(matchRegexp(`map\[int\]bool \| len:2`, hashMatchingRegexp("3: true", "4: false"))) + }) + + Context("when the slice contains long entries", func() { + It("should format the entries with newlines", func() { + m := map[string][]byte{} + m["Josiah Edward Bartlet"] = []byte("Martin Sheen") + m["Toby Ziegler"] = []byte("Richard Schiff") + m["CJ Cregg"] = []byte("Allison Janney") + expected := `{ + ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), + ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), + ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), + }` + Expect(Object(m, 1)).Should(matchRegexp(`map\[string\]\[\]uint8 \| len:3`, expected)) + }) + }) + }) + + Describe("formatting structs", func() { + It("should include the struct name and the field names", func() { + s := SimpleStruct{ + Name: "Oswald", + Enumeration: 17, + Veritas: true, + Data: []byte("datum"), + secret: 1983, + } + + Expect(Object(s, 1)).Should(match("format_test.SimpleStruct", `{Name: "Oswald", Enumeration: 17, Veritas: true, Data: "datum", secret: 1983}`)) + }) + + Context("when the struct contains long entries", func() { + It("should format the entries with new lines", func() { + s := &SimpleStruct{ + Name: "Mithrandir Gandalf Greyhame", + Enumeration: 2021, + Veritas: true, + Data: []byte("wizard"), + secret: 3, + } + + Expect(Object(s, 1)).Should(match(fmt.Sprintf("*format_test.SimpleStruct | %p", s), `{ + Name: "Mithrandir Gandalf Greyhame", + Enumeration: 2021, + Veritas: true, + Data: "wizard", + secret: 3, + }`)) + }) + }) + }) + + Describe("formatting nil values", func() { + It("should print out nil", func() { + Expect(Object(nil, 1)).Should(match("nil", "nil")) + var typedNil *AStruct + Expect(Object(typedNil, 1)).Should(match("*format_test.AStruct | 0x0", "nil")) + var c chan<- bool + Expect(Object(c, 1)).Should(match("chan<- bool | len:0, cap:0", "nil")) + var s []string + Expect(Object(s, 1)).Should(match("[]string | len:0, cap:0", "nil")) + var m map[string]bool + Expect(Object(m, 1)).Should(match("map[string]bool | len:0", "nil")) + }) + }) + + Describe("formatting aliased types", func() { + It("should print out the correct alias type", func() { + Expect(Object(StringAlias("alias"), 1)).Should(match("format_test.StringAlias", `alias`)) + Expect(Object(ByteAlias("alias"), 1)).Should(matchRegexp(`format_test\.ByteAlias \| len:5, cap:\d+`, `alias`)) + Expect(Object(IntAlias(3), 1)).Should(match("format_test.IntAlias", "3")) + }) + }) + + Describe("handling nested things", func() { + It("should produce a correctly nested representation", func() { + s := ComplexStruct{ + Strings: []string{"lots", "of", "short", "strings"}, + SimpleThings: []*SimpleStruct{ + {"short", 7, true, []byte("succinct"), 17}, + {"something longer", 427, true, []byte("designed to wrap around nicely"), 30}, + }, + DataMaps: map[int]ByteAlias{ + 17: ByteAlias("some substantially longer chunks of data"), + 1138: ByteAlias("that should make things wrap"), + }, + } + expected := `{ + Strings: \["lots", "of", "short", "strings"\], + SimpleThings: \[ + {Name: "short", Enumeration: 7, Veritas: true, Data: "succinct", secret: 17}, + { + Name: "something longer", + Enumeration: 427, + Veritas: true, + Data: "designed to wrap around nicely", + secret: 30, + }, + \], + DataMaps: { + (17: "some substantially longer chunks of data"|1138: "that should make things wrap"), + (17: "some substantially longer chunks of data"|1138: "that should make things wrap"), + }, + }` + Expect(Object(s, 1)).Should(matchRegexp(`format_test\.ComplexStruct`, expected)) + }) + }) + + Describe("formatting times", func() { + It("should format time as RFC3339", func() { + t := time.Date(2016, 10, 31, 9, 57, 23, 12345, time.UTC) + Expect(Object(t, 1)).Should(match("time.Time", `2016-10-31T09:57:23.000012345Z`)) + }) + }) + }) + + Describe("Handling unexported fields in structs", func() { + It("should handle all the various types correctly", func() { + a := int(5) + s := SecretiveStruct{ + boolValue: true, + intValue: 3, + uintValue: 4, + uintptrValue: 5, + floatValue: 6.0, + complexValue: complex(5.0, 3.0), + chanValue: make(chan bool, 2), + funcValue: func() {}, + pointerValue: &a, + sliceValue: []string{"string", "slice"}, + byteSliceValue: []byte("bytes"), + stringValue: "a string", + arrValue: [3]int{11, 12, 13}, + byteArrValue: [3]byte{17, 20, 32}, + mapValue: map[string]int{"a key": 20, "b key": 30}, + structValue: AStruct{"exported"}, + interfaceValue: map[string]int{"a key": 17}, + } + + expected := fmt.Sprintf(`{ + boolValue: true, + intValue: 3, + uintValue: 4, + uintptrValue: 0x5, + floatValue: 6, + complexValue: \(5\+3i\), + chanValue: %p, + funcValue: %p, + pointerValue: 5, + sliceValue: \["string", "slice"\], + byteSliceValue: "bytes", + stringValue: "a string", + arrValue: \[11, 12, 13\], + byteArrValue: \[17, 20, 32\], + mapValue: %s, + structValue: {Exported: "exported"}, + interfaceValue: {"a key": 17}, + }`, s.chanValue, s.funcValue, hashMatchingRegexp(`"a key": 20`, `"b key": 30`)) + + Expect(Object(s, 1)).Should(matchRegexp(`format_test\.SecretiveStruct`, expected)) + }) + }) + + Describe("Handling interfaces", func() { + It("should unpack the interface", func() { + outerHash := map[string]interface{}{} + innerHash := map[string]int{} + + innerHash["inner"] = 3 + outerHash["integer"] = 2 + outerHash["map"] = innerHash + + expected := hashMatchingRegexp(`"integer": 2`, `"map": {"inner": 3}`) + Expect(Object(outerHash, 1)).Should(matchRegexp(`map\[string\]interface {} \| len:2`, expected)) + }) + }) + + Describe("Handling recursive things", func() { + It("should not go crazy...", func() { + m := map[string]interface{}{} + m["integer"] = 2 + m["map"] = m + Expect(Object(m, 1)).Should(ContainSubstring("...")) + }) + + It("really should not go crazy...", func() { + type complexKey struct { + Value map[interface{}]int + } + + complexObject := complexKey{} + complexObject.Value = make(map[interface{}]int) + + complexObject.Value[&complexObject] = 2 + Expect(Object(complexObject, 1)).Should(ContainSubstring("...")) + }) + }) + + Describe("When instructed to use the Stringer representation", func() { + BeforeEach(func() { + UseStringerRepresentation = true + }) + + AfterEach(func() { + UseStringerRepresentation = false + }) + + Context("when passed a GoStringer", func() { + It("should use what GoString() returns", func() { + Expect(Object(GoStringer{}, 1)).Should(ContainSubstring(": go-string")) + }) + }) + + Context("when passed a stringer", func() { + It("should use what String() returns", func() { + Expect(Object(Stringer{}, 1)).Should(ContainSubstring(": string")) + }) + }) + }) + + Describe("Printing a context.Context field", func() { + + type structWithContext struct { + Context Ctx + Value string + } + + context := ctx{} + objWithContext := structWithContext{Value: "some-value", Context: &context} + + It("Suppresses the content by default", func() { + Expect(Object(objWithContext, 1)).Should(ContainSubstring("")) + }) + + It("Doesn't supress the context if it's the object being printed", func() { + Expect(Object(context, 1)).ShouldNot(MatchRegexp("^.*$")) + }) + + Context("PrintContextObjects is set", func() { + BeforeEach(func() { + PrintContextObjects = true + }) + + AfterEach(func() { + PrintContextObjects = false + }) + + It("Prints the context", func() { + Expect(Object(objWithContext, 1)).ShouldNot(ContainSubstring("")) + }) + }) + }) +}) + +var expectedLongStringFailureMessage = strings.TrimSpace(` +Expected + : "...aaaaabaaaaa..." +to equal | + : "...aaaaazaaaaa..." +`) +var expectedTruncatedEndStringFailureMessage = strings.TrimSpace(` +Expected + : "baaaaa..." +to equal | + : "zaaaaa..." +`) +var expectedTruncatedStartStringFailureMessage = strings.TrimSpace(` +Expected + : "...aaaaab" +to equal | + : "...aaaaaz" +`) +var expectedTruncatedStartSizeFailureMessage = strings.TrimSpace(` +Expected + : "...aaaaaa" +to equal | + : "...aaaaa" +`) +var expectedTruncatedStartSizeSwappedFailureMessage = strings.TrimSpace(` +Expected + : "...aaaa" +to equal | + : "...aaaaa" +`) +var expectedTruncatedMultiByteFailureMessage = strings.TrimSpace(` +Expected + : "...tuvwxyz1" +to equal | + : "...tuvwxyz" +`) + +var expectedFullFailureDiff = strings.TrimSpace(` +Expected + : aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +to equal + : aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +`) diff --git a/vendor/github.com/onsi/gomega/gbytes/buffer.go b/vendor/github.com/onsi/gomega/gbytes/buffer.go new file mode 100644 index 000000000..336086f4a --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/buffer.go @@ -0,0 +1,245 @@ +/* +Package gbytes provides a buffer that supports incrementally detecting input. + +You use gbytes.Buffer with the gbytes.Say matcher. When Say finds a match, it fastforwards the buffer's read cursor to the end of that match. + +Subsequent matches against the buffer will only operate against data that appears *after* the read cursor. + +The read cursor is an opaque implementation detail that you cannot access. You should use the Say matcher to sift through the buffer. You can always +access the entire buffer's contents with Contents(). + +*/ +package gbytes + +import ( + "errors" + "fmt" + "io" + "regexp" + "sync" + "time" +) + +/* +gbytes.Buffer implements an io.Writer and can be used with the gbytes.Say matcher. + +You should only use a gbytes.Buffer in test code. It stores all writes in an in-memory buffer - behavior that is inappropriate for production code! +*/ +type Buffer struct { + contents []byte + readCursor uint64 + lock *sync.Mutex + detectCloser chan interface{} + closed bool +} + +/* +NewBuffer returns a new gbytes.Buffer +*/ +func NewBuffer() *Buffer { + return &Buffer{ + lock: &sync.Mutex{}, + } +} + +/* +BufferWithBytes returns a new gbytes.Buffer seeded with the passed in bytes +*/ +func BufferWithBytes(bytes []byte) *Buffer { + return &Buffer{ + lock: &sync.Mutex{}, + contents: bytes, + } +} + +/* +BufferReader returns a new gbytes.Buffer that wraps a reader. The reader's contents are read into +the Buffer via io.Copy +*/ +func BufferReader(reader io.Reader) *Buffer { + b := &Buffer{ + lock: &sync.Mutex{}, + } + + go func() { + io.Copy(b, reader) + b.Close() + }() + + return b +} + +/* +Write implements the io.Writer interface +*/ +func (b *Buffer) Write(p []byte) (n int, err error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.closed { + return 0, errors.New("attempt to write to closed buffer") + } + + b.contents = append(b.contents, p...) + return len(p), nil +} + +/* +Read implements the io.Reader interface. It advances the +cursor as it reads. + +Returns an error if called after Close. +*/ +func (b *Buffer) Read(d []byte) (int, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.closed { + return 0, errors.New("attempt to read from closed buffer") + } + + if uint64(len(b.contents)) <= b.readCursor { + return 0, io.EOF + } + + n := copy(d, b.contents[b.readCursor:]) + b.readCursor += uint64(n) + + return n, nil +} + +/* +Close signifies that the buffer will no longer be written to +*/ +func (b *Buffer) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + b.closed = true + + return nil +} + +/* +Closed returns true if the buffer has been closed +*/ +func (b *Buffer) Closed() bool { + b.lock.Lock() + defer b.lock.Unlock() + + return b.closed +} + +/* +Contents returns all data ever written to the buffer. +*/ +func (b *Buffer) Contents() []byte { + b.lock.Lock() + defer b.lock.Unlock() + + contents := make([]byte, len(b.contents)) + copy(contents, b.contents) + return contents +} + +/* +Detect takes a regular expression and returns a channel. + +The channel will receive true the first time data matching the regular expression is written to the buffer. +The channel is subsequently closed and the buffer's read-cursor is fast-forwarded to just after the matching region. + +You typically don't need to use Detect and should use the ghttp.Say matcher instead. Detect is useful, however, in cases where your code must +be branch and handle different outputs written to the buffer. + +For example, consider a buffer hooked up to the stdout of a client library. You may (or may not, depending on state outside of your control) need to authenticate the client library. + +You could do something like: + +select { +case <-buffer.Detect("You are not logged in"): + //log in +case <-buffer.Detect("Success"): + //carry on +case <-time.After(time.Second): + //welp +} +buffer.CancelDetects() + +You should always call CancelDetects after using Detect. This will close any channels that have not detected and clean up the goroutines that were spawned to support them. + +Finally, you can pass detect a format string followed by variadic arguments. This will construct the regexp using fmt.Sprintf. +*/ +func (b *Buffer) Detect(desired string, args ...interface{}) chan bool { + formattedRegexp := desired + if len(args) > 0 { + formattedRegexp = fmt.Sprintf(desired, args...) + } + re := regexp.MustCompile(formattedRegexp) + + b.lock.Lock() + defer b.lock.Unlock() + + if b.detectCloser == nil { + b.detectCloser = make(chan interface{}) + } + + closer := b.detectCloser + response := make(chan bool) + go func() { + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + defer close(response) + for { + select { + case <-ticker.C: + b.lock.Lock() + data, cursor := b.contents[b.readCursor:], b.readCursor + loc := re.FindIndex(data) + b.lock.Unlock() + + if loc != nil { + response <- true + b.lock.Lock() + newCursorPosition := cursor + uint64(loc[1]) + if newCursorPosition >= b.readCursor { + b.readCursor = newCursorPosition + } + b.lock.Unlock() + return + } + case <-closer: + return + } + } + }() + + return response +} + +/* +CancelDetects cancels any pending detects and cleans up their goroutines. You should always call this when you're done with a set of Detect channels. +*/ +func (b *Buffer) CancelDetects() { + b.lock.Lock() + defer b.lock.Unlock() + + close(b.detectCloser) + b.detectCloser = nil +} + +func (b *Buffer) didSay(re *regexp.Regexp) (bool, []byte) { + b.lock.Lock() + defer b.lock.Unlock() + + unreadBytes := b.contents[b.readCursor:] + copyOfUnreadBytes := make([]byte, len(unreadBytes)) + copy(copyOfUnreadBytes, unreadBytes) + + loc := re.FindIndex(unreadBytes) + + if loc != nil { + b.readCursor += uint64(loc[1]) + return true, copyOfUnreadBytes + } + return false, copyOfUnreadBytes +} diff --git a/vendor/github.com/onsi/gomega/gbytes/buffer_test.go b/vendor/github.com/onsi/gomega/gbytes/buffer_test.go new file mode 100644 index 000000000..9d4e8279d --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/buffer_test.go @@ -0,0 +1,205 @@ +package gbytes_test + +import ( + "io" + "time" + + . "github.com/onsi/gomega/gbytes" + + "bytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type SlowReader struct { + R io.Reader + D time.Duration +} + +func (s SlowReader) Read(p []byte) (int, error) { + time.Sleep(s.D) + return s.R.Read(p) +} + +var _ = Describe("Buffer", func() { + var buffer *Buffer + + BeforeEach(func() { + buffer = NewBuffer() + }) + + Describe("dumping the entire contents of the buffer", func() { + It("should return everything that's been written", func() { + buffer.Write([]byte("abc")) + buffer.Write([]byte("def")) + Expect(buffer.Contents()).Should(Equal([]byte("abcdef"))) + + Expect(buffer).Should(Say("bcd")) + Expect(buffer.Contents()).Should(Equal([]byte("abcdef"))) + }) + }) + + Describe("creating a buffer with bytes", func() { + It("should create the buffer with the cursor set to the beginning", func() { + buffer := BufferWithBytes([]byte("abcdef")) + Expect(buffer.Contents()).Should(Equal([]byte("abcdef"))) + Expect(buffer).Should(Say("abc")) + Expect(buffer).ShouldNot(Say("abc")) + Expect(buffer).Should(Say("def")) + }) + }) + + Describe("creating a buffer that wraps a reader", func() { + Context("for a well-behaved reader", func() { + It("should buffer the contents of the reader", func() { + reader := bytes.NewBuffer([]byte("abcdef")) + buffer := BufferReader(reader) + Eventually(buffer).Should(Say("abc")) + Expect(buffer).ShouldNot(Say("abc")) + Eventually(buffer).Should(Say("def")) + Eventually(buffer.Closed).Should(BeTrue()) + }) + }) + + Context("for a slow reader", func() { + It("should allow Eventually to time out", func() { + slowReader := SlowReader{ + R: bytes.NewBuffer([]byte("abcdef")), + D: time.Second, + } + buffer := BufferReader(slowReader) + failures := InterceptGomegaFailures(func() { + Eventually(buffer, 100*time.Millisecond).Should(Say("abc")) + }) + Expect(failures).ShouldNot(BeEmpty()) + + fastReader := SlowReader{ + R: bytes.NewBuffer([]byte("abcdef")), + D: time.Millisecond, + } + buffer = BufferReader(fastReader) + Eventually(buffer, 100*time.Millisecond).Should(Say("abc")) + Eventually(buffer.Closed).Should(BeTrue()) + }) + }) + }) + + Describe("reading from a buffer", func() { + It("should read the current contents of the buffer", func() { + buffer := BufferWithBytes([]byte("abcde")) + + dest := make([]byte, 3) + n, err := buffer.Read(dest) + Expect(err).ShouldNot(HaveOccurred()) + Expect(n).Should(Equal(3)) + Expect(string(dest)).Should(Equal("abc")) + + dest = make([]byte, 3) + n, err = buffer.Read(dest) + Expect(err).ShouldNot(HaveOccurred()) + Expect(n).Should(Equal(2)) + Expect(string(dest[:n])).Should(Equal("de")) + + n, err = buffer.Read(dest) + Expect(err).Should(Equal(io.EOF)) + Expect(n).Should(Equal(0)) + }) + + Context("after the buffer has been closed", func() { + It("returns an error", func() { + buffer := BufferWithBytes([]byte("abcde")) + + buffer.Close() + + dest := make([]byte, 3) + n, err := buffer.Read(dest) + Expect(err).Should(HaveOccurred()) + Expect(n).Should(Equal(0)) + }) + }) + }) + + Describe("detecting regular expressions", func() { + It("should fire the appropriate channel when the passed in pattern matches, then close it", func(done Done) { + go func() { + time.Sleep(10 * time.Millisecond) + buffer.Write([]byte("abcde")) + }() + + A := buffer.Detect("%s", "a.c") + B := buffer.Detect("def") + + var gotIt bool + select { + case gotIt = <-A: + case <-B: + Fail("should not have gotten here") + } + + Expect(gotIt).Should(BeTrue()) + Eventually(A).Should(BeClosed()) + + buffer.Write([]byte("f")) + Eventually(B).Should(Receive()) + Eventually(B).Should(BeClosed()) + + close(done) + }) + + It("should fast-forward the buffer upon detection", func(done Done) { + buffer.Write([]byte("abcde")) + <-buffer.Detect("abc") + Expect(buffer).ShouldNot(Say("abc")) + Expect(buffer).Should(Say("de")) + close(done) + }) + + It("should only fast-forward the buffer when the channel is read, and only if doing so would not rewind it", func(done Done) { + buffer.Write([]byte("abcde")) + A := buffer.Detect("abc") + time.Sleep(20 * time.Millisecond) //give the goroutine a chance to detect and write to the channel + Expect(buffer).Should(Say("abcd")) + <-A + Expect(buffer).ShouldNot(Say("d")) + Expect(buffer).Should(Say("e")) + Eventually(A).Should(BeClosed()) + close(done) + }) + + It("should be possible to cancel a detection", func(done Done) { + A := buffer.Detect("abc") + B := buffer.Detect("def") + buffer.CancelDetects() + buffer.Write([]byte("abcdef")) + Eventually(A).Should(BeClosed()) + Eventually(B).Should(BeClosed()) + + Expect(buffer).Should(Say("bcde")) + <-buffer.Detect("f") + close(done) + }) + }) + + Describe("closing the buffer", func() { + It("should error when further write attempts are made", func() { + _, err := buffer.Write([]byte("abc")) + Expect(err).ShouldNot(HaveOccurred()) + + buffer.Close() + + _, err = buffer.Write([]byte("def")) + Expect(err).Should(HaveOccurred()) + + Expect(buffer.Contents()).Should(Equal([]byte("abc"))) + }) + + It("should be closed", func() { + Expect(buffer.Closed()).Should(BeFalse()) + + buffer.Close() + + Expect(buffer.Closed()).Should(BeTrue()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go b/vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go new file mode 100644 index 000000000..3a7dc0612 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go @@ -0,0 +1,13 @@ +package gbytes_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestGbytes(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gbytes Suite") +} diff --git a/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go b/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go new file mode 100644 index 000000000..3caed8769 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go @@ -0,0 +1,85 @@ +package gbytes + +import ( + "errors" + "io" + "time" +) + +// ErrTimeout is returned by TimeoutCloser, TimeoutReader, and TimeoutWriter when the underlying Closer/Reader/Writer does not return within the specified timeout +var ErrTimeout = errors.New("timeout occurred") + +// TimeoutCloser returns an io.Closer that wraps the passed-in io.Closer. If the underlying Closer fails to close within the alloted timeout ErrTimeout is returned. +func TimeoutCloser(c io.Closer, timeout time.Duration) io.Closer { + return timeoutReaderWriterCloser{c: c, d: timeout} +} + +// TimeoutReader returns an io.Reader that wraps the passed-in io.Reader. If the underlying Reader fails to read within the alloted timeout ErrTimeout is returned. +func TimeoutReader(r io.Reader, timeout time.Duration) io.Reader { + return timeoutReaderWriterCloser{r: r, d: timeout} +} + +// TimeoutWriter returns an io.Writer that wraps the passed-in io.Writer. If the underlying Writer fails to write within the alloted timeout ErrTimeout is returned. +func TimeoutWriter(w io.Writer, timeout time.Duration) io.Writer { + return timeoutReaderWriterCloser{w: w, d: timeout} +} + +type timeoutReaderWriterCloser struct { + c io.Closer + w io.Writer + r io.Reader + d time.Duration +} + +func (t timeoutReaderWriterCloser) Close() error { + done := make(chan struct{}) + var err error + + go func() { + err = t.c.Close() + close(done) + }() + + select { + case <-done: + return err + case <-time.After(t.d): + return ErrTimeout + } +} + +func (t timeoutReaderWriterCloser) Read(p []byte) (int, error) { + done := make(chan struct{}) + var n int + var err error + + go func() { + n, err = t.r.Read(p) + close(done) + }() + + select { + case <-done: + return n, err + case <-time.After(t.d): + return 0, ErrTimeout + } +} + +func (t timeoutReaderWriterCloser) Write(p []byte) (int, error) { + done := make(chan struct{}) + var n int + var err error + + go func() { + n, err = t.w.Write(p) + close(done) + }() + + select { + case <-done: + return n, err + case <-time.After(t.d): + return 0, ErrTimeout + } +} diff --git a/vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go b/vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go new file mode 100644 index 000000000..3da973498 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go @@ -0,0 +1,188 @@ +package gbytes_test + +import ( + "fmt" + "io" + "time" + + . "github.com/onsi/gomega/gbytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type FakeCloser struct { + err error + duration time.Duration +} + +func (f FakeCloser) Close() error { + time.Sleep(f.duration) + return f.err +} + +type FakeReader struct { + err error + duration time.Duration +} + +func (f FakeReader) Read(p []byte) (int, error) { + time.Sleep(f.duration) + if f.err != nil { + return 0, f.err + } + + for i := 0; i < len(p); i++ { + p[i] = 'a' + } + + return len(p), nil +} + +type FakeWriter struct { + err error + duration time.Duration +} + +func (f FakeWriter) Write(p []byte) (int, error) { + time.Sleep(f.duration) + if f.err != nil { + return 0, f.err + } + + return len(p), nil +} + +var _ = Describe("Io Wrappers", func() { + Describe("TimeoutCloser", func() { + var innerCloser io.Closer + var timeoutCloser io.Closer + + JustBeforeEach(func() { + timeoutCloser = TimeoutCloser(innerCloser, 20*time.Millisecond) + }) + + Context("when the underlying Closer closes with no error", func() { + BeforeEach(func() { + innerCloser = FakeCloser{} + }) + + It("returns with no error", func() { + Expect(timeoutCloser.Close()).Should(Succeed()) + }) + }) + + Context("when the underlying Closer closes with an error", func() { + BeforeEach(func() { + innerCloser = FakeCloser{err: fmt.Errorf("boom")} + }) + + It("returns the error", func() { + Expect(timeoutCloser.Close()).Should(MatchError("boom")) + }) + }) + + Context("when the underlying Closer hangs", func() { + BeforeEach(func() { + innerCloser = FakeCloser{ + err: fmt.Errorf("boom"), + duration: time.Hour, + } + }) + + It("returns ErrTimeout", func() { + Expect(timeoutCloser.Close()).Should(MatchError(ErrTimeout)) + }) + }) + }) + + Describe("TimeoutReader", func() { + var innerReader io.Reader + var timeoutReader io.Reader + + JustBeforeEach(func() { + timeoutReader = TimeoutReader(innerReader, 20*time.Millisecond) + }) + + Context("when the underlying Reader returns no error", func() { + BeforeEach(func() { + innerReader = FakeReader{} + }) + + It("returns with no error", func() { + p := make([]byte, 5) + n, err := timeoutReader.Read(p) + Expect(n).Should(Equal(5)) + Expect(err).ShouldNot(HaveOccurred()) + Expect(p).Should(Equal([]byte("aaaaa"))) + }) + }) + + Context("when the underlying Reader returns an error", func() { + BeforeEach(func() { + innerReader = FakeReader{err: fmt.Errorf("boom")} + }) + + It("returns the error", func() { + p := make([]byte, 5) + _, err := timeoutReader.Read(p) + Expect(err).Should(MatchError("boom")) + }) + }) + + Context("when the underlying Reader hangs", func() { + BeforeEach(func() { + innerReader = FakeReader{err: fmt.Errorf("boom"), duration: time.Hour} + }) + + It("returns ErrTimeout", func() { + p := make([]byte, 5) + _, err := timeoutReader.Read(p) + Expect(err).Should(MatchError(ErrTimeout)) + }) + }) + }) + + Describe("TimeoutWriter", func() { + var innerWriter io.Writer + var timeoutWriter io.Writer + + JustBeforeEach(func() { + timeoutWriter = TimeoutWriter(innerWriter, 20*time.Millisecond) + }) + + Context("when the underlying Writer returns no error", func() { + BeforeEach(func() { + innerWriter = FakeWriter{} + }) + + It("returns with no error", func() { + n, err := timeoutWriter.Write([]byte("aaaaa")) + Expect(n).Should(Equal(5)) + Expect(err).ShouldNot(HaveOccurred()) + }) + }) + + Context("when the underlying Writer returns an error", func() { + BeforeEach(func() { + innerWriter = FakeWriter{err: fmt.Errorf("boom")} + }) + + It("returns the error", func() { + _, err := timeoutWriter.Write([]byte("aaaaa")) + Expect(err).Should(MatchError("boom")) + }) + }) + + Context("when the underlying Writer hangs", func() { + BeforeEach(func() { + innerWriter = FakeWriter{err: fmt.Errorf("boom"), duration: time.Hour} + }) + + It("returns ErrTimeout", func() { + _, err := timeoutWriter.Write([]byte("aaaaa")) + Expect(err).Should(MatchError(ErrTimeout)) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go new file mode 100644 index 000000000..772142046 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go @@ -0,0 +1,105 @@ +package gbytes + +import ( + "fmt" + "regexp" + + "github.com/onsi/gomega/format" +) + +//Objects satisfying the BufferProvider can be used with the Say matcher. +type BufferProvider interface { + Buffer() *Buffer +} + +/* +Say is a Gomega matcher that operates on gbytes.Buffers: + + Expect(buffer).Should(Say("something")) + +will succeed if the unread portion of the buffer matches the regular expression "something". + +When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match. +Thus, subsequent calls to Say will only match against the unread portion of the buffer + +Say pairs very well with Eventually. To assert that a buffer eventually receives data matching "[123]-star" within 3 seconds you can: + + Eventually(buffer, 3).Should(Say("[123]-star")) + +Ditto with consistently. To assert that a buffer does not receive data matching "never-see-this" for 1 second you can: + + Consistently(buffer, 1).ShouldNot(Say("never-see-this")) + +In addition to bytes.Buffers, Say can operate on objects that implement the gbytes.BufferProvider interface. +In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer() + +If the buffer is closed, the Say matcher will tell Eventually to abort. +*/ +func Say(expected string, args ...interface{}) *sayMatcher { + formattedRegexp := expected + if len(args) > 0 { + formattedRegexp = fmt.Sprintf(expected, args...) + } + return &sayMatcher{ + re: regexp.MustCompile(formattedRegexp), + } +} + +type sayMatcher struct { + re *regexp.Regexp + receivedSayings []byte +} + +func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) { + var buffer *Buffer + + switch x := actual.(type) { + case *Buffer: + buffer = x + case BufferProvider: + buffer = x.Buffer() + default: + return nil, false + } + + return buffer, true +} + +func (m *sayMatcher) Match(actual interface{}) (success bool, err error) { + buffer, ok := m.buffer(actual) + if !ok { + return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider. Got:\n%s", format.Object(actual, 1)) + } + + didSay, sayings := buffer.didSay(m.re) + m.receivedSayings = sayings + + return didSay, nil +} + +func (m *sayMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf( + "Got stuck at:\n%s\nWaiting for:\n%s", + format.IndentString(string(m.receivedSayings), 1), + format.IndentString(m.re.String(), 1), + ) +} + +func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf( + "Saw:\n%s\nWhich matches the unexpected:\n%s", + format.IndentString(string(m.receivedSayings), 1), + format.IndentString(m.re.String(), 1), + ) +} + +func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + switch x := actual.(type) { + case *Buffer: + return !x.Closed() + case BufferProvider: + return !x.Buffer().Closed() + default: + return true + } +} diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go new file mode 100644 index 000000000..ef8f784f9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go @@ -0,0 +1,164 @@ +package gbytes_test + +import ( + "time" + + . "github.com/onsi/gomega/gbytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type speaker struct { + buffer *Buffer +} + +func (s *speaker) Buffer() *Buffer { + return s.buffer +} + +var _ = Describe("SayMatcher", func() { + var buffer *Buffer + + BeforeEach(func() { + buffer = NewBuffer() + buffer.Write([]byte("abc")) + }) + + Context("when actual is not a gexec Buffer, or a BufferProvider", func() { + It("should error", func() { + failures := InterceptGomegaFailures(func() { + Expect("foo").Should(Say("foo")) + }) + Expect(failures[0]).Should(ContainSubstring("*gbytes.Buffer")) + }) + }) + + Context("when a match is found", func() { + It("should succeed", func() { + Expect(buffer).Should(Say("abc")) + }) + + It("should support printf-like formatting", func() { + Expect(buffer).Should(Say("a%sc", "b")) + }) + + It("should use a regular expression", func() { + Expect(buffer).Should(Say("a.c")) + }) + + It("should fastforward the buffer", func() { + buffer.Write([]byte("def")) + Expect(buffer).Should(Say("abcd")) + Expect(buffer).Should(Say("ef")) + Expect(buffer).ShouldNot(Say("[a-z]")) + }) + }) + + Context("when no match is found", func() { + It("should not error", func() { + Expect(buffer).ShouldNot(Say("def")) + }) + + Context("when the buffer is closed", func() { + BeforeEach(func() { + buffer.Close() + }) + + It("should abort an eventually", func() { + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(buffer).Should(Say("def")) + }) + Eventually(buffer).ShouldNot(Say("def")) + Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + Expect(failures).Should(HaveLen(1)) + + t = time.Now() + Eventually(buffer).Should(Say("abc")) + Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + }) + + It("should abort a consistently", func() { + t := time.Now() + Consistently(buffer, 2.0).ShouldNot(Say("def")) + Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + }) + + It("should not error with a synchronous matcher", func() { + Expect(buffer).ShouldNot(Say("def")) + Expect(buffer).Should(Say("abc")) + }) + }) + }) + + Context("when a positive match fails", func() { + It("should report where it got stuck", func() { + Expect(buffer).Should(Say("abc")) + buffer.Write([]byte("def")) + failures := InterceptGomegaFailures(func() { + Expect(buffer).Should(Say("abc")) + }) + Expect(failures[0]).Should(ContainSubstring("Got stuck at:")) + Expect(failures[0]).Should(ContainSubstring("def")) + }) + }) + + Context("when a negative match fails", func() { + It("should report where it got stuck", func() { + failures := InterceptGomegaFailures(func() { + Expect(buffer).ShouldNot(Say("abc")) + }) + Expect(failures[0]).Should(ContainSubstring("Saw:")) + Expect(failures[0]).Should(ContainSubstring("Which matches the unexpected:")) + Expect(failures[0]).Should(ContainSubstring("abc")) + }) + }) + + Context("when a match is not found", func() { + It("should not fastforward the buffer", func() { + Expect(buffer).ShouldNot(Say("def")) + Expect(buffer).Should(Say("abc")) + }) + }) + + Context("a nice real-life example", func() { + It("should behave well", func() { + Expect(buffer).Should(Say("abc")) + go func() { + time.Sleep(10 * time.Millisecond) + buffer.Write([]byte("def")) + }() + Expect(buffer).ShouldNot(Say("def")) + Eventually(buffer).Should(Say("def")) + }) + }) + + Context("when actual is a BufferProvider", func() { + It("should use actual's buffer", func() { + s := &speaker{ + buffer: NewBuffer(), + } + + Expect(s).ShouldNot(Say("abc")) + + s.Buffer().Write([]byte("abc")) + Expect(s).Should(Say("abc")) + }) + + It("should abort an eventually", func() { + s := &speaker{ + buffer: NewBuffer(), + } + + s.buffer.Close() + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(s).Should(Say("def")) + }) + Expect(failures).Should(HaveLen(1)) + Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go b/vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go new file mode 100644 index 000000000..16091c22b --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "math/rand" + "os" + "strconv" + "time" +) + +var outQuote = "We've done the impossible, and that makes us mighty." +var errQuote = "Ah, curse your sudden but inevitable betrayal!" + +var randomQuotes = []string{ + "Can we maybe vote on the whole murdering people issue?", + "I swear by my pretty floral bonnet, I will end you.", + "My work's illegal, but at least it's honest.", +} + +func main() { + fmt.Fprintln(os.Stdout, outQuote) + fmt.Fprintln(os.Stderr, errQuote) + + randomIndex := rand.New(rand.NewSource(time.Now().UnixNano())).Intn(len(randomQuotes)) + + time.Sleep(100 * time.Millisecond) + + fmt.Fprintln(os.Stdout, randomQuotes[randomIndex]) + + if len(os.Args) == 2 { + exitCode, _ := strconv.Atoi(os.Args[1]) + os.Exit(exitCode) + } else { + os.Exit(randomIndex) + } +} diff --git a/vendor/github.com/onsi/gomega/gexec/build.go b/vendor/github.com/onsi/gomega/gexec/build.go new file mode 100644 index 000000000..869c1ead8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/build.go @@ -0,0 +1,112 @@ +package gexec + +import ( + "errors" + "fmt" + "go/build" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + "sync" +) + +var ( + mu sync.Mutex + tmpDir string +) + +/* +Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory. +A path pointing to this binary is returned. + +Build uses the $GOPATH set in your environment. If $GOPATH is not set and you are using Go 1.8+, +it will use the default GOPATH instead. It passes the variadic args on to `go build`. +*/ +func Build(packagePath string, args ...string) (compiledPath string, err error) { + return doBuild(build.Default.GOPATH, packagePath, nil, args...) +} + +/* +BuildWithEnvironment is identical to Build but allows you to specify env vars to be set at build time. +*/ +func BuildWithEnvironment(packagePath string, env []string, args ...string) (compiledPath string, err error) { + return doBuild(build.Default.GOPATH, packagePath, env, args...) +} + +/* +BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument). +*/ +func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) { + return doBuild(gopath, packagePath, nil, args...) +} + +func replaceGoPath(environ []string, newGoPath string) []string { + newEnviron := []string{} + for _, v := range environ { + if !strings.HasPrefix(v, "GOPATH=") { + newEnviron = append(newEnviron, v) + } + } + return append(newEnviron, "GOPATH="+newGoPath) +} + +func doBuild(gopath, packagePath string, env []string, args ...string) (compiledPath string, err error) { + tmpDir, err := temporaryDirectory() + if err != nil { + return "", err + } + + if len(gopath) == 0 { + return "", errors.New("$GOPATH not provided when building " + packagePath) + } + + executable := filepath.Join(tmpDir, path.Base(packagePath)) + if runtime.GOOS == "windows" { + executable = executable + ".exe" + } + + cmdArgs := append([]string{"build"}, args...) + cmdArgs = append(cmdArgs, "-o", executable, packagePath) + + build := exec.Command("go", cmdArgs...) + build.Env = replaceGoPath(os.Environ(), gopath) + build.Env = append(build.Env, env...) + + output, err := build.CombinedOutput() + if err != nil { + return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output)) + } + + return executable, nil +} + +/* +You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by +gexec. In Ginkgo this is typically done in an AfterSuite callback. +*/ +func CleanupBuildArtifacts() { + mu.Lock() + defer mu.Unlock() + if tmpDir != "" { + os.RemoveAll(tmpDir) + tmpDir = "" + } +} + +func temporaryDirectory() (string, error) { + var err error + mu.Lock() + defer mu.Unlock() + if tmpDir == "" { + tmpDir, err = ioutil.TempDir("", "gexec_artifacts") + if err != nil { + return "", err + } + } + + return ioutil.TempDir(tmpDir, "g") +} diff --git a/vendor/github.com/onsi/gomega/gexec/build_test.go b/vendor/github.com/onsi/gomega/gexec/build_test.go new file mode 100644 index 000000000..87c623cb5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/build_test.go @@ -0,0 +1,87 @@ +package gexec_test + +import ( + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" +) + +var packagePath = "./_fixture/firefly" + +var _ = Describe(".Build", func() { + Context("when there have been previous calls to Build", func() { + BeforeEach(func() { + _, err := gexec.Build(packagePath) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("compiles the specified package", func() { + compiledPath, err := gexec.Build(packagePath) + Expect(err).ShouldNot(HaveOccurred()) + Expect(compiledPath).Should(BeAnExistingFile()) + }) + + Context("and CleanupBuildArtifacts has been called", func() { + BeforeEach(func() { + gexec.CleanupBuildArtifacts() + }) + + It("compiles the specified package", func() { + var err error + fireflyPath, err = gexec.Build(packagePath) + Expect(err).ShouldNot(HaveOccurred()) + Expect(fireflyPath).Should(BeAnExistingFile()) + }) + }) + }) +}) + +var _ = Describe(".BuildWithEnvironment", func() { + var err error + env := []string{ + "GOOS=linux", + "GOARCH=amd64", + } + + It("compiles the specified package with the specified env vars", func() { + compiledPath, err := gexec.BuildWithEnvironment(packagePath, env) + Expect(err).ShouldNot(HaveOccurred()) + Expect(compiledPath).Should(BeAnExistingFile()) + }) + + It("returns the environment to a good state", func() { + _, err = gexec.BuildWithEnvironment(packagePath, env) + Expect(err).ShouldNot(HaveOccurred()) + Expect(os.Environ()).ShouldNot(ContainElement("GOOS=linux")) + }) +}) + +var _ = Describe(".BuildIn", func() { + var ( + gopath string + ) + + BeforeEach(func() { + gopath = os.Getenv("GOPATH") + Expect(gopath).NotTo(BeEmpty()) + Expect(os.Setenv("GOPATH", "/tmp")).To(Succeed()) + Expect(os.Environ()).To(ContainElement("GOPATH=/tmp")) + }) + + AfterEach(func() { + Expect(os.Setenv("GOPATH", gopath)).To(Succeed()) + }) + + It("appends the gopath env var", func() { + _, err := gexec.BuildIn(gopath, "github.com/onsi/gomega/gexec/_fixture/firefly/") + Expect(err).NotTo(HaveOccurred()) + }) + + It("resets GOPATH to its original value", func() { + _, err := gexec.BuildIn(gopath, "github.com/onsi/gomega/gexec/_fixture/firefly/") + Expect(err).NotTo(HaveOccurred()) + Expect(os.Getenv("GOPATH")).To(Equal("/tmp")) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go new file mode 100644 index 000000000..98a354937 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go @@ -0,0 +1,86 @@ +package gexec + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +/* +The Exit matcher operates on a session: + + Expect(session).Should(Exit()) + +Exit passes if the session has already exited. + +If no status code is provided, then Exit will succeed if the session has exited regardless of exit code. +Otherwise, Exit will only succeed if the process has exited with the provided status code. + +Note that the process must have already exited. To wait for a process to exit, use Eventually: + + Eventually(session, 3).Should(Exit(0)) +*/ +func Exit(optionalExitCode ...int) *exitMatcher { + exitCode := -1 + if len(optionalExitCode) > 0 { + exitCode = optionalExitCode[0] + } + + return &exitMatcher{ + exitCode: exitCode, + } +} + +type exitMatcher struct { + exitCode int + didExit bool + actualExitCode int +} + +type Exiter interface { + ExitCode() int +} + +func (m *exitMatcher) Match(actual interface{}) (success bool, err error) { + exiter, ok := actual.(Exiter) + if !ok { + return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1)) + } + + m.actualExitCode = exiter.ExitCode() + + if m.actualExitCode == -1 { + return false, nil + } + + if m.exitCode == -1 { + return true, nil + } + return m.exitCode == m.actualExitCode, nil +} + +func (m *exitMatcher) FailureMessage(actual interface{}) (message string) { + if m.actualExitCode == -1 { + return "Expected process to exit. It did not." + } + return format.Message(m.actualExitCode, "to match exit code:", m.exitCode) +} + +func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) { + if m.actualExitCode == -1 { + return "you really shouldn't be able to see this!" + } else { + if m.exitCode == -1 { + return "Expected process not to exit. It did." + } + return format.Message(m.actualExitCode, "not to match exit code:", m.exitCode) + } +} + +func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + session, ok := actual.(*Session) + if ok { + return session.ExitCode() == -1 + } + return true +} diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go new file mode 100644 index 000000000..9abc3226f --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go @@ -0,0 +1,114 @@ +package gexec_test + +import ( + "os/exec" + "time" + + . "github.com/onsi/gomega/gexec" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type NeverExits struct{} + +func (e NeverExits) ExitCode() int { + return -1 +} + +var _ = Describe("ExitMatcher", func() { + var command *exec.Cmd + var session *Session + + BeforeEach(func() { + var err error + command = exec.Command(fireflyPath, "0") + session, err = Start(command, nil, nil) + Expect(err).ShouldNot(HaveOccurred()) + }) + + Describe("when passed something that is an Exiter", func() { + It("should act normally", func() { + failures := InterceptGomegaFailures(func() { + Expect(NeverExits{}).Should(Exit()) + }) + + Expect(failures[0]).Should(ContainSubstring("Expected process to exit. It did not.")) + }) + }) + + Describe("when passed something that is not an Exiter", func() { + It("should error", func() { + failures := InterceptGomegaFailures(func() { + Expect("aardvark").Should(Exit()) + }) + + Expect(failures[0]).Should(ContainSubstring("Exit must be passed a gexec.Exiter")) + }) + }) + + Context("with no exit code", func() { + It("should say the right things when it fails", func() { + Expect(session).ShouldNot(Exit()) + + failures := InterceptGomegaFailures(func() { + Expect(session).Should(Exit()) + }) + + Expect(failures[0]).Should(ContainSubstring("Expected process to exit. It did not.")) + + Eventually(session).Should(Exit()) + + Expect(session).Should(Exit()) + + failures = InterceptGomegaFailures(func() { + Expect(session).ShouldNot(Exit()) + }) + + Expect(failures[0]).Should(ContainSubstring("Expected process not to exit. It did.")) + }) + }) + + Context("with an exit code", func() { + It("should say the right things when it fails", func() { + Expect(session).ShouldNot(Exit(0)) + Expect(session).ShouldNot(Exit(1)) + + failures := InterceptGomegaFailures(func() { + Expect(session).Should(Exit(0)) + }) + + Expect(failures[0]).Should(ContainSubstring("Expected process to exit. It did not.")) + + Eventually(session).Should(Exit(0)) + + Expect(session).Should(Exit(0)) + + failures = InterceptGomegaFailures(func() { + Expect(session).Should(Exit(1)) + }) + + Expect(failures[0]).Should(ContainSubstring("to match exit code:")) + + Expect(session).ShouldNot(Exit(1)) + + failures = InterceptGomegaFailures(func() { + Expect(session).ShouldNot(Exit(0)) + }) + + Expect(failures[0]).Should(ContainSubstring("not to match exit code:")) + }) + }) + + Describe("bailing out early", func() { + It("should bail out early once the process exits", func() { + t := time.Now() + + failures := InterceptGomegaFailures(func() { + Eventually(session).Should(Exit(1)) + }) + Expect(time.Since(t)).Should(BeNumerically("<=", 500*time.Millisecond)) + Expect(failures).Should(HaveLen(1)) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go b/vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go new file mode 100644 index 000000000..dc8e1f40c --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go @@ -0,0 +1,26 @@ +package gexec_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + "testing" +) + +var fireflyPath string + +func TestGexec(t *testing.T) { + BeforeSuite(func() { + var err error + fireflyPath, err = gexec.Build("./_fixture/firefly") + Expect(err).ShouldNot(HaveOccurred()) + }) + + AfterSuite(func() { + gexec.CleanupBuildArtifacts() + }) + + RegisterFailHandler(Fail) + RunSpecs(t, "Gexec Suite") +} diff --git a/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go new file mode 100644 index 000000000..05e695abc --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go @@ -0,0 +1,53 @@ +package gexec + +import ( + "io" + "sync" +) + +/* +PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line. +This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each +session by passing in a PrefixedWriter: + +gexec.Start(cmd, NewPrefixedWriter("[my-cmd] ", GinkgoWriter), NewPrefixedWriter("[my-cmd] ", GinkgoWriter)) +*/ +type PrefixedWriter struct { + prefix []byte + writer io.Writer + lock *sync.Mutex + atStartOfLine bool +} + +func NewPrefixedWriter(prefix string, writer io.Writer) *PrefixedWriter { + return &PrefixedWriter{ + prefix: []byte(prefix), + writer: writer, + lock: &sync.Mutex{}, + atStartOfLine: true, + } +} + +func (w *PrefixedWriter) Write(b []byte) (int, error) { + w.lock.Lock() + defer w.lock.Unlock() + + toWrite := []byte{} + + for _, c := range b { + if w.atStartOfLine { + toWrite = append(toWrite, w.prefix...) + } + + toWrite = append(toWrite, c) + + w.atStartOfLine = c == '\n' + } + + _, err := w.writer.Write(toWrite) + if err != nil { + return 0, err + } + + return len(b), nil +} diff --git a/vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go b/vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go new file mode 100644 index 000000000..e847b1501 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go @@ -0,0 +1,43 @@ +package gexec_test + +import ( + "bytes" + + . "github.com/onsi/gomega/gexec" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("PrefixedWriter", func() { + var buffer *bytes.Buffer + var writer *PrefixedWriter + BeforeEach(func() { + buffer = &bytes.Buffer{} + writer = NewPrefixedWriter("[p]", buffer) + }) + + It("should emit the prefix on newlines", func() { + writer.Write([]byte("abc")) + writer.Write([]byte("def\n")) + writer.Write([]byte("hij\n")) + writer.Write([]byte("\n\n")) + writer.Write([]byte("klm\n\nnop")) + writer.Write([]byte("")) + writer.Write([]byte("qrs")) + writer.Write([]byte("\ntuv\nwx")) + writer.Write([]byte("yz\n\n")) + + Expect(buffer.String()).Should(Equal(`[p]abcdef +[p]hij +[p] +[p] +[p]klm +[p] +[p]nopqrs +[p]tuv +[p]wxyz +[p] +`)) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gexec/session.go b/vendor/github.com/onsi/gomega/gexec/session.go new file mode 100644 index 000000000..5cb00ca65 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/session.go @@ -0,0 +1,303 @@ +/* +Package gexec provides support for testing external processes. +*/ +package gexec + +import ( + "io" + "os" + "os/exec" + "sync" + "syscall" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gbytes" +) + +const INVALID_EXIT_CODE = 254 + +type Session struct { + //The wrapped command + Command *exec.Cmd + + //A *gbytes.Buffer connected to the command's stdout + Out *gbytes.Buffer + + //A *gbytes.Buffer connected to the command's stderr + Err *gbytes.Buffer + + //A channel that will close when the command exits + Exited <-chan struct{} + + lock *sync.Mutex + exitCode int +} + +/* +Start starts the passed-in *exec.Cmd command. It wraps the command in a *gexec.Session. + +The session pipes the command's stdout and stderr to two *gbytes.Buffers available as properties on the session: session.Out and session.Err. +These buffers can be used with the gbytes.Say matcher to match against unread output: + + Expect(session.Out).Should(gbytes.Say("foo-out")) + Expect(session.Err).Should(gbytes.Say("foo-err")) + +In addition, Session satisfies the gbytes.BufferProvider interface and provides the stdout *gbytes.Buffer. This allows you to replace the first line, above, with: + + Expect(session).Should(gbytes.Say("foo-out")) + +When outWriter and/or errWriter are non-nil, the session will pipe stdout and/or stderr output both into the session *gybtes.Buffers and to the passed-in outWriter/errWriter. +This is useful for capturing the process's output or logging it to screen. In particular, when using Ginkgo it can be convenient to direct output to the GinkgoWriter: + + session, err := Start(command, GinkgoWriter, GinkgoWriter) + +This will log output when running tests in verbose mode, but - otherwise - will only log output when a test fails. + +The session wrapper is responsible for waiting on the *exec.Cmd command. You *should not* call command.Wait() yourself. +Instead, to assert that the command has exited you can use the gexec.Exit matcher: + + Expect(session).Should(gexec.Exit()) + +When the session exits it closes the stdout and stderr gbytes buffers. This will short circuit any +Eventuallys waiting for the buffers to Say something. +*/ +func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Session, error) { + exited := make(chan struct{}) + + session := &Session{ + Command: command, + Out: gbytes.NewBuffer(), + Err: gbytes.NewBuffer(), + Exited: exited, + lock: &sync.Mutex{}, + exitCode: -1, + } + + var commandOut, commandErr io.Writer + + commandOut, commandErr = session.Out, session.Err + + if outWriter != nil { + commandOut = io.MultiWriter(commandOut, outWriter) + } + + if errWriter != nil { + commandErr = io.MultiWriter(commandErr, errWriter) + } + + command.Stdout = commandOut + command.Stderr = commandErr + + err := command.Start() + if err == nil { + go session.monitorForExit(exited) + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + trackedSessions = append(trackedSessions, session) + } + + return session, err +} + +/* +Buffer implements the gbytes.BufferProvider interface and returns s.Out +This allows you to make gbytes.Say matcher assertions against stdout without having to reference .Out: + + Eventually(session).Should(gbytes.Say("foo")) +*/ +func (s *Session) Buffer() *gbytes.Buffer { + return s.Out +} + +/* +ExitCode returns the wrapped command's exit code. If the command hasn't exited yet, ExitCode returns -1. + +To assert that the command has exited it is more convenient to use the Exit matcher: + + Eventually(s).Should(gexec.Exit()) + +When the process exits because it has received a particular signal, the exit code will be 128+signal-value +(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html) + +*/ +func (s *Session) ExitCode() int { + s.lock.Lock() + defer s.lock.Unlock() + return s.exitCode +} + +/* +Wait waits until the wrapped command exits. It can be passed an optional timeout. +If the command does not exit within the timeout, Wait will trigger a test failure. + +Wait returns the session, making it possible to chain: + + session.Wait().Out.Contents() + +will wait for the command to exit then return the entirety of Out's contents. + +Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does. +*/ +func (s *Session) Wait(timeout ...interface{}) *Session { + EventuallyWithOffset(1, s, timeout...).Should(Exit()) + return s +} + +/* +Kill sends the running command a SIGKILL signal. It does not wait for the process to exit. + +If the command has already exited, Kill returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Kill() *Session { + return s.Signal(syscall.SIGKILL) +} + +/* +Interrupt sends the running command a SIGINT signal. It does not wait for the process to exit. + +If the command has already exited, Interrupt returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Interrupt() *Session { + return s.Signal(syscall.SIGINT) +} + +/* +Terminate sends the running command a SIGTERM signal. It does not wait for the process to exit. + +If the command has already exited, Terminate returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Terminate() *Session { + return s.Signal(syscall.SIGTERM) +} + +/* +Signal sends the running command the passed in signal. It does not wait for the process to exit. + +If the command has already exited, Signal returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Signal(signal os.Signal) *Session { + if s.processIsAlive() { + s.Command.Process.Signal(signal) + } + return s +} + +func (s *Session) monitorForExit(exited chan<- struct{}) { + err := s.Command.Wait() + s.lock.Lock() + s.Out.Close() + s.Err.Close() + status := s.Command.ProcessState.Sys().(syscall.WaitStatus) + if status.Signaled() { + s.exitCode = 128 + int(status.Signal()) + } else { + exitStatus := status.ExitStatus() + if exitStatus == -1 && err != nil { + s.exitCode = INVALID_EXIT_CODE + } + s.exitCode = exitStatus + } + s.lock.Unlock() + + close(exited) +} + +func (s *Session) processIsAlive() bool { + return s.ExitCode() == -1 && s.Command.Process != nil +} + +var trackedSessions = []*Session{} +var trackedSessionsMutex = &sync.Mutex{} + +/* +Kill sends a SIGKILL signal to all the processes started by Run, and waits for them to exit. +The timeout specified is applied to each process killed. + +If any of the processes already exited, KillAndWait returns silently. +*/ +func KillAndWait(timeout ...interface{}) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Kill().Wait(timeout...) + } + trackedSessions = []*Session{} +} + +/* +Kill sends a SIGTERM signal to all the processes started by Run, and waits for them to exit. +The timeout specified is applied to each process killed. + +If any of the processes already exited, TerminateAndWait returns silently. +*/ +func TerminateAndWait(timeout ...interface{}) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Terminate().Wait(timeout...) + } +} + +/* +Kill sends a SIGKILL signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Kill returns silently. +*/ +func Kill() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Kill() + } +} + +/* +Terminate sends a SIGTERM signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Terminate returns silently. +*/ +func Terminate() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Terminate() + } +} + +/* +Signal sends the passed in signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Signal returns silently. +*/ +func Signal(signal os.Signal) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Signal(signal) + } +} + +/* +Interrupt sends the SIGINT signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Interrupt returns silently. +*/ +func Interrupt() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Interrupt() + } +} diff --git a/vendor/github.com/onsi/gomega/gexec/session_test.go b/vendor/github.com/onsi/gomega/gexec/session_test.go new file mode 100644 index 000000000..6fdf22d21 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/session_test.go @@ -0,0 +1,336 @@ +package gexec_test + +import ( + "io" + "io/ioutil" + "os/exec" + "syscall" + "time" + + . "github.com/onsi/gomega/gbytes" + . "github.com/onsi/gomega/gexec" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Session", func() { + var command *exec.Cmd + var session *Session + + var outWriter, errWriter io.Writer + + BeforeEach(func() { + outWriter = nil + errWriter = nil + }) + + JustBeforeEach(func() { + command = exec.Command(fireflyPath) + var err error + session, err = Start(command, outWriter, errWriter) + Expect(err).ShouldNot(HaveOccurred()) + }) + + Context("running a command", func() { + It("should start the process", func() { + Expect(command.Process).ShouldNot(BeNil()) + }) + + It("should wrap the process's stdout and stderr with gbytes buffers", func(done Done) { + Eventually(session.Out).Should(Say("We've done the impossible, and that makes us mighty")) + Eventually(session.Err).Should(Say("Ah, curse your sudden but inevitable betrayal!")) + defer session.Out.CancelDetects() + + select { + case <-session.Out.Detect("Can we maybe vote on the whole murdering people issue"): + Eventually(session).Should(Exit(0)) + case <-session.Out.Detect("I swear by my pretty floral bonnet, I will end you."): + Eventually(session).Should(Exit(1)) + case <-session.Out.Detect("My work's illegal, but at least it's honest."): + Eventually(session).Should(Exit(2)) + } + + close(done) + }) + + It("should satisfy the gbytes.BufferProvider interface, passing Stdout", func() { + Eventually(session).Should(Say("We've done the impossible, and that makes us mighty")) + Eventually(session).Should(Exit()) + }) + }) + + Describe("providing the exit code", func() { + It("should provide the app's exit code", func() { + Expect(session.ExitCode()).Should(Equal(-1)) + + Eventually(session).Should(Exit()) + Expect(session.ExitCode()).Should(BeNumerically(">=", 0)) + Expect(session.ExitCode()).Should(BeNumerically("<", 3)) + }) + }) + + Describe("wait", func() { + It("should wait till the command exits", func() { + Expect(session.ExitCode()).Should(Equal(-1)) + Expect(session.Wait().ExitCode()).Should(BeNumerically(">=", 0)) + Expect(session.Wait().ExitCode()).Should(BeNumerically("<", 3)) + }) + }) + + Describe("exited", func() { + It("should close when the command exits", func() { + Eventually(session.Exited).Should(BeClosed()) + Expect(session.ExitCode()).ShouldNot(Equal(-1)) + }) + }) + + Describe("kill", func() { + It("should kill the command", func() { + session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session.Kill() + Eventually(session).Should(Exit(128 + 9)) + }) + }) + + Describe("interrupt", func() { + It("should interrupt the command", func() { + session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session.Interrupt() + Eventually(session).Should(Exit(128 + 2)) + }) + }) + + Describe("terminate", func() { + It("should terminate the command", func() { + session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session.Terminate() + Eventually(session).Should(Exit(128 + 15)) + }) + }) + + Describe("signal", func() { + It("should send the signal to the command", func() { + session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session.Signal(syscall.SIGABRT) + Eventually(session).Should(Exit(128 + 6)) + }) + + It("should ignore sending a signal if the command did not start", func() { + session, err := Start(exec.Command("notexisting"), GinkgoWriter, GinkgoWriter) + Expect(err).To(HaveOccurred()) + + Expect(func() { session.Signal(syscall.SIGUSR1) }).NotTo(Panic()) + }) + }) + + Context("tracking sessions", func() { + BeforeEach(func() { + KillAndWait() + }) + + Describe("kill", func() { + It("should kill all the started sessions", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + Kill() + + Eventually(session1).Should(Exit(128 + 9)) + Eventually(session2).Should(Exit(128 + 9)) + Eventually(session3).Should(Exit(128 + 9)) + }) + + It("should not track unstarted sessions", func() { + _, err := Start(exec.Command("does not exist", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).Should(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + Kill() + + Eventually(session2).Should(Exit(128 + 9)) + Eventually(session3).Should(Exit(128 + 9)) + }) + + }) + + Describe("killAndWait", func() { + It("should kill all the started sessions and wait for them to finish", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + KillAndWait() + Expect(session1).Should(Exit(128+9), "Should have exited") + Expect(session2).Should(Exit(128+9), "Should have exited") + Expect(session3).Should(Exit(128+9), "Should have exited") + }) + }) + + Describe("terminate", func() { + It("should terminate all the started sessions", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + Terminate() + + Eventually(session1).Should(Exit(128 + 15)) + Eventually(session2).Should(Exit(128 + 15)) + Eventually(session3).Should(Exit(128 + 15)) + }) + }) + + Describe("terminateAndWait", func() { + It("should terminate all the started sessions, and wait for them to exit", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + TerminateAndWait() + + Expect(session1).Should(Exit(128+15), "Should have exited") + Expect(session2).Should(Exit(128+15), "Should have exited") + Expect(session3).Should(Exit(128+15), "Should have exited") + }) + }) + + Describe("signal", func() { + It("should signal all the started sessions", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + Signal(syscall.SIGABRT) + + Eventually(session1).Should(Exit(128 + 6)) + Eventually(session2).Should(Exit(128 + 6)) + Eventually(session3).Should(Exit(128 + 6)) + }) + }) + + Describe("interrupt", func() { + It("should interrupt all the started sessions, and not wait", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Expect(err).ShouldNot(HaveOccurred()) + + Interrupt() + + Eventually(session1).Should(Exit(128 + 2)) + Eventually(session2).Should(Exit(128 + 2)) + Eventually(session3).Should(Exit(128 + 2)) + }) + }) + }) + + Context("when the command exits", func() { + It("should close the buffers", func() { + Eventually(session).Should(Exit()) + + Expect(session.Out.Closed()).Should(BeTrue()) + Expect(session.Err.Closed()).Should(BeTrue()) + + Expect(session.Out).Should(Say("We've done the impossible, and that makes us mighty")) + }) + + var So = It + + So("this means that eventually should short circuit", func() { + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(session).Should(Say("blah blah blah blah blah")) + }) + Expect(time.Since(t)).Should(BeNumerically("<=", 500*time.Millisecond)) + Expect(failures).Should(HaveLen(1)) + }) + }) + + Context("when wrapping out and err", func() { + var ( + outWriterBuffer, errWriterBuffer *Buffer + ) + + BeforeEach(func() { + outWriterBuffer = NewBuffer() + outWriter = outWriterBuffer + errWriterBuffer = NewBuffer() + errWriter = errWriterBuffer + }) + + It("should route to both the provided writers and the gbytes buffers", func() { + Eventually(session.Out).Should(Say("We've done the impossible, and that makes us mighty")) + Eventually(session.Err).Should(Say("Ah, curse your sudden but inevitable betrayal!")) + + Expect(outWriterBuffer.Contents()).Should(ContainSubstring("We've done the impossible, and that makes us mighty")) + Expect(errWriterBuffer.Contents()).Should(ContainSubstring("Ah, curse your sudden but inevitable betrayal!")) + + Eventually(session).Should(Exit()) + + Expect(outWriterBuffer.Contents()).Should(Equal(session.Out.Contents())) + Expect(errWriterBuffer.Contents()).Should(Equal(session.Err.Contents())) + }) + + Context("when discarding the output of the command", func() { + BeforeEach(func() { + outWriter = ioutil.Discard + errWriter = ioutil.Discard + }) + + It("executes succesfuly", func() { + Eventually(session).Should(Exit()) + }) + }) + }) + + Describe("when the command fails to start", func() { + It("should return an error", func() { + _, err := Start(exec.Command("agklsjdfas"), nil, nil) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/ghttp/handlers.go b/vendor/github.com/onsi/gomega/ghttp/handlers.go new file mode 100644 index 000000000..82bc1412d --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/handlers.go @@ -0,0 +1,313 @@ +package ghttp + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + + "github.com/golang/protobuf/proto" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +//CombineHandler takes variadic list of handlers and produces one handler +//that calls each handler in order. +func CombineHandlers(handlers ...http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + for _, handler := range handlers { + handler(w, req) + } + } +} + +//VerifyRequest returns a handler that verifies that a request uses the specified method to connect to the specified path +//You may also pass in an optional rawQuery string which is tested against the request's `req.URL.RawQuery` +// +//For path, you may pass in a string, in which case strict equality will be applied +//Alternatively you can pass in a matcher (ContainSubstring("/foo") and MatchRegexp("/foo/[a-f0-9]+") for example) +func VerifyRequest(method string, path interface{}, rawQuery ...string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + Expect(req.Method).Should(Equal(method), "Method mismatch") + switch p := path.(type) { + case types.GomegaMatcher: + Expect(req.URL.Path).Should(p, "Path mismatch") + default: + Expect(req.URL.Path).Should(Equal(path), "Path mismatch") + } + if len(rawQuery) > 0 { + values, err := url.ParseQuery(rawQuery[0]) + Expect(err).ShouldNot(HaveOccurred(), "Expected RawQuery is malformed") + + Expect(req.URL.Query()).Should(Equal(values), "RawQuery mismatch") + } + } +} + +//VerifyContentType returns a handler that verifies that a request has a Content-Type header set to the +//specified value +func VerifyContentType(contentType string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + Expect(req.Header.Get("Content-Type")).Should(Equal(contentType)) + } +} + +//VerifyBasicAuth returns a handler that verifies the request contains a BasicAuth Authorization header +//matching the passed in username and password +func VerifyBasicAuth(username string, password string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + auth := req.Header.Get("Authorization") + Expect(auth).ShouldNot(Equal(""), "Authorization header must be specified") + + decoded, err := base64.StdEncoding.DecodeString(auth[6:]) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(string(decoded)).Should(Equal(fmt.Sprintf("%s:%s", username, password)), "Authorization mismatch") + } +} + +//VerifyHeader returns a handler that verifies the request contains the passed in headers. +//The passed in header keys are first canonicalized via http.CanonicalHeaderKey. +// +//The request must contain *all* the passed in headers, but it is allowed to have additional headers +//beyond the passed in set. +func VerifyHeader(header http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + for key, values := range header { + key = http.CanonicalHeaderKey(key) + Expect(req.Header[key]).Should(Equal(values), "Header mismatch for key: %s", key) + } + } +} + +//VerifyHeaderKV returns a handler that verifies the request contains a header matching the passed in key and values +//(recall that a `http.Header` is a mapping from string (key) to []string (values)) +//It is a convenience wrapper around `VerifyHeader` that allows you to avoid having to create an `http.Header` object. +func VerifyHeaderKV(key string, values ...string) http.HandlerFunc { + return VerifyHeader(http.Header{key: values}) +} + +//VerifyBody returns a handler that verifies that the body of the request matches the passed in byte array. +//It does this using Equal(). +func VerifyBody(expectedBody []byte) http.HandlerFunc { + return CombineHandlers( + func(w http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + req.Body.Close() + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(Equal(expectedBody), "Body Mismatch") + }, + ) +} + +//VerifyJSON returns a handler that verifies that the body of the request is a valid JSON representation +//matching the passed in JSON string. It does this using Gomega's MatchJSON method +// +//VerifyJSON also verifies that the request's content type is application/json +func VerifyJSON(expectedJSON string) http.HandlerFunc { + return CombineHandlers( + VerifyContentType("application/json"), + func(w http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + req.Body.Close() + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(MatchJSON(expectedJSON), "JSON Mismatch") + }, + ) +} + +//VerifyJSONRepresenting is similar to VerifyJSON. Instead of taking a JSON string, however, it +//takes an arbitrary JSON-encodable object and verifies that the requests's body is a JSON representation +//that matches the object +func VerifyJSONRepresenting(object interface{}) http.HandlerFunc { + data, err := json.Marshal(object) + Expect(err).ShouldNot(HaveOccurred()) + return CombineHandlers( + VerifyContentType("application/json"), + VerifyJSON(string(data)), + ) +} + +//VerifyForm returns a handler that verifies a request contains the specified form values. +// +//The request must contain *all* of the specified values, but it is allowed to have additional +//form values beyond the passed in set. +func VerifyForm(values url.Values) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + Expect(err).ShouldNot(HaveOccurred()) + for key, vals := range values { + Expect(r.Form[key]).Should(Equal(vals), "Form mismatch for key: %s", key) + } + } +} + +//VerifyFormKV returns a handler that verifies a request contains a form key with the specified values. +// +//It is a convenience wrapper around `VerifyForm` that lets you avoid having to create a `url.Values` object. +func VerifyFormKV(key string, values ...string) http.HandlerFunc { + return VerifyForm(url.Values{key: values}) +} + +//VerifyProtoRepresenting returns a handler that verifies that the body of the request is a valid protobuf +//representation of the passed message. +// +//VerifyProtoRepresenting also verifies that the request's content type is application/x-protobuf +func VerifyProtoRepresenting(expected proto.Message) http.HandlerFunc { + return CombineHandlers( + VerifyContentType("application/x-protobuf"), + func(w http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + Expect(err).ShouldNot(HaveOccurred()) + req.Body.Close() + + expectedType := reflect.TypeOf(expected) + actualValuePtr := reflect.New(expectedType.Elem()) + + actual, ok := actualValuePtr.Interface().(proto.Message) + Expect(ok).Should(BeTrue(), "Message value is not a proto.Message") + + err = proto.Unmarshal(body, actual) + Expect(err).ShouldNot(HaveOccurred(), "Failed to unmarshal protobuf") + + Expect(actual).Should(Equal(expected), "ProtoBuf Mismatch") + }, + ) +} + +func copyHeader(src http.Header, dst http.Header) { + for key, value := range src { + dst[key] = value + } +} + +/* +RespondWith returns a handler that responds to a request with the specified status code and body + +Body may be a string or []byte + +Also, RespondWith can be given an optional http.Header. The headers defined therein will be added to the response headers. +*/ +func RespondWith(statusCode int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + if len(optionalHeader) == 1 { + copyHeader(optionalHeader[0], w.Header()) + } + w.WriteHeader(statusCode) + switch x := body.(type) { + case string: + w.Write([]byte(x)) + case []byte: + w.Write(x) + default: + Expect(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.") + } + } +} + +/* +RespondWithPtr returns a handler that responds to a request with the specified status code and body + +Unlike RespondWith, you pass RepondWithPtr a pointer to the status code and body allowing different tests +to share the same setup but specify different status codes and bodies. + +Also, RespondWithPtr can be given an optional http.Header. The headers defined therein will be added to the response headers. +Since the http.Header can be mutated after the fact you don't need to pass in a pointer. +*/ +func RespondWithPtr(statusCode *int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + if len(optionalHeader) == 1 { + copyHeader(optionalHeader[0], w.Header()) + } + w.WriteHeader(*statusCode) + if body != nil { + switch x := (body).(type) { + case *string: + w.Write([]byte(*x)) + case *[]byte: + w.Write(*x) + default: + Expect(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.") + } + } + } +} + +/* +RespondWithJSONEncoded returns a handler that responds to a request with the specified status code and a body +containing the JSON-encoding of the passed in object + +Also, RespondWithJSONEncoded can be given an optional http.Header. The headers defined therein will be added to the response headers. +*/ +func RespondWithJSONEncoded(statusCode int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc { + data, err := json.Marshal(object) + Expect(err).ShouldNot(HaveOccurred()) + + var headers http.Header + if len(optionalHeader) == 1 { + headers = optionalHeader[0] + } else { + headers = make(http.Header) + } + if _, found := headers["Content-Type"]; !found { + headers["Content-Type"] = []string{"application/json"} + } + return RespondWith(statusCode, string(data), headers) +} + +/* +RespondWithJSONEncodedPtr behaves like RespondWithJSONEncoded but takes a pointer +to a status code and object. + +This allows different tests to share the same setup but specify different status codes and JSON-encoded +objects. + +Also, RespondWithJSONEncodedPtr can be given an optional http.Header. The headers defined therein will be added to the response headers. +Since the http.Header can be mutated after the fact you don't need to pass in a pointer. +*/ +func RespondWithJSONEncodedPtr(statusCode *int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + data, err := json.Marshal(object) + Expect(err).ShouldNot(HaveOccurred()) + var headers http.Header + if len(optionalHeader) == 1 { + headers = optionalHeader[0] + } else { + headers = make(http.Header) + } + if _, found := headers["Content-Type"]; !found { + headers["Content-Type"] = []string{"application/json"} + } + copyHeader(headers, w.Header()) + w.WriteHeader(*statusCode) + w.Write(data) + } +} + +//RespondWithProto returns a handler that responds to a request with the specified status code and a body +//containing the protobuf serialization of the provided message. +// +//Also, RespondWithProto can be given an optional http.Header. The headers defined therein will be added to the response headers. +func RespondWithProto(statusCode int, message proto.Message, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + data, err := proto.Marshal(message) + Expect(err).ShouldNot(HaveOccurred()) + + var headers http.Header + if len(optionalHeader) == 1 { + headers = optionalHeader[0] + } else { + headers = make(http.Header) + } + if _, found := headers["Content-Type"]; !found { + headers["Content-Type"] = []string{"application/x-protobuf"} + } + copyHeader(headers, w.Header()) + + w.WriteHeader(statusCode) + w.Write(data) + } +} diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go b/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go new file mode 100644 index 000000000..b2972bc9f --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go @@ -0,0 +1,3 @@ +package protobuf + +//go:generate protoc --go_out=. simple_message.proto diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go new file mode 100644 index 000000000..c55a48448 --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go @@ -0,0 +1,55 @@ +// Code generated by protoc-gen-go. +// source: simple_message.proto +// DO NOT EDIT! + +/* +Package protobuf is a generated protocol buffer package. + +It is generated from these files: + simple_message.proto + +It has these top-level messages: + SimpleMessage +*/ +package protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type SimpleMessage struct { + Description *string `protobuf:"bytes,1,req,name=description" json:"description,omitempty"` + Id *int32 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` + Metadata *string `protobuf:"bytes,3,opt,name=metadata" json:"metadata,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleMessage) Reset() { *m = SimpleMessage{} } +func (m *SimpleMessage) String() string { return proto.CompactTextString(m) } +func (*SimpleMessage) ProtoMessage() {} + +func (m *SimpleMessage) GetDescription() string { + if m != nil && m.Description != nil { + return *m.Description + } + return "" +} + +func (m *SimpleMessage) GetId() int32 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *SimpleMessage) GetMetadata() string { + if m != nil && m.Metadata != nil { + return *m.Metadata + } + return "" +} diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto new file mode 100644 index 000000000..35b7145c2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto @@ -0,0 +1,9 @@ +syntax = "proto2"; + +package protobuf; + +message SimpleMessage { + required string description = 1; + required int32 id = 2; + optional string metadata = 3; +} diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server.go b/vendor/github.com/onsi/gomega/ghttp/test_server.go new file mode 100644 index 000000000..ba473fefa --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/test_server.go @@ -0,0 +1,418 @@ +/* +Package ghttp supports testing HTTP clients by providing a test server (simply a thin wrapper around httptest's server) that supports +registering multiple handlers. Incoming requests are not routed between the different handlers +- rather it is merely the order of the handlers that matters. The first request is handled by the first +registered handler, the second request by the second handler, etc. + +The intent here is to have each handler *verify* that the incoming request is valid. To accomplish, ghttp +also provides a collection of bite-size handlers that each perform one aspect of request verification. These can +be composed together and registered with a ghttp server. The result is an expressive language for describing +the requests generated by the client under test. + +Here's a simple example, note that the server handler is only defined in one BeforeEach and then modified, as required, by the nested BeforeEaches. +A more comprehensive example is available at https://onsi.github.io/gomega/#_testing_http_clients + + var _ = Describe("A Sprockets Client", func() { + var server *ghttp.Server + var client *SprocketClient + BeforeEach(func() { + server = ghttp.NewServer() + client = NewSprocketClient(server.URL(), "skywalker", "tk427") + }) + + AfterEach(func() { + server.Close() + }) + + Describe("fetching sprockets", func() { + var statusCode int + var sprockets []Sprocket + BeforeEach(func() { + statusCode = http.StatusOK + sprockets = []Sprocket{} + server.AppendHandlers(ghttp.CombineHandlers( + ghttp.VerifyRequest("GET", "/sprockets"), + ghttp.VerifyBasicAuth("skywalker", "tk427"), + ghttp.RespondWithJSONEncodedPtr(&statusCode, &sprockets), + )) + }) + + Context("when requesting all sprockets", func() { + Context("when the response is succesful", func() { + BeforeEach(func() { + sprockets = []Sprocket{ + NewSprocket("Alfalfa"), + NewSprocket("Banana"), + } + }) + + It("should return the returned sprockets", func() { + Expect(client.Sprockets()).Should(Equal(sprockets)) + }) + }) + + Context("when the response is missing", func() { + BeforeEach(func() { + statusCode = http.StatusNotFound + }) + + It("should return an empty list of sprockets", func() { + Expect(client.Sprockets()).Should(BeEmpty()) + }) + }) + + Context("when the response fails to authenticate", func() { + BeforeEach(func() { + statusCode = http.StatusUnauthorized + }) + + It("should return an AuthenticationError error", func() { + sprockets, err := client.Sprockets() + Expect(sprockets).Should(BeEmpty()) + Expect(err).Should(MatchError(AuthenticationError)) + }) + }) + + Context("when the response is a server failure", func() { + BeforeEach(func() { + statusCode = http.StatusInternalServerError + }) + + It("should return an InternalError error", func() { + sprockets, err := client.Sprockets() + Expect(sprockets).Should(BeEmpty()) + Expect(err).Should(MatchError(InternalError)) + }) + }) + }) + + Context("when requesting some sprockets", func() { + BeforeEach(func() { + sprockets = []Sprocket{ + NewSprocket("Alfalfa"), + NewSprocket("Banana"), + } + + server.WrapHandler(0, ghttp.VerifyRequest("GET", "/sprockets", "filter=FOOD")) + }) + + It("should make the request with a filter", func() { + Expect(client.Sprockets("food")).Should(Equal(sprockets)) + }) + }) + }) + }) +*/ +package ghttp + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/http/httputil" + "reflect" + "regexp" + "strings" + "sync" + + . "github.com/onsi/gomega" +) + +func new() *Server { + return &Server{ + AllowUnhandledRequests: false, + UnhandledRequestStatusCode: http.StatusInternalServerError, + writeLock: &sync.Mutex{}, + } +} + +type routedHandler struct { + method string + pathRegexp *regexp.Regexp + path string + handler http.HandlerFunc +} + +// NewServer returns a new `*ghttp.Server` that wraps an `httptest` server. The server is started automatically. +func NewServer() *Server { + s := new() + s.HTTPTestServer = httptest.NewServer(s) + return s +} + +// NewUnstartedServer return a new, unstarted, `*ghttp.Server`. Useful for specifying a custom listener on `server.HTTPTestServer`. +func NewUnstartedServer() *Server { + s := new() + s.HTTPTestServer = httptest.NewUnstartedServer(s) + return s +} + +// NewTLSServer returns a new `*ghttp.Server` that wraps an `httptest` TLS server. The server is started automatically. +func NewTLSServer() *Server { + s := new() + s.HTTPTestServer = httptest.NewTLSServer(s) + return s +} + +type Server struct { + //The underlying httptest server + HTTPTestServer *httptest.Server + + //Defaults to false. If set to true, the Server will allow more requests than there are registered handlers. + //Direct use of this property is deprecated and is likely to be removed, use GetAllowUnhandledRequests and SetAllowUnhandledRequests instead. + AllowUnhandledRequests bool + + //The status code returned when receiving an unhandled request. + //Defaults to http.StatusInternalServerError. + //Only applies if AllowUnhandledRequests is true + //Direct use of this property is deprecated and is likely to be removed, use GetUnhandledRequestStatusCode and SetUnhandledRequestStatusCode instead. + UnhandledRequestStatusCode int + + //If provided, ghttp will log about each request received to the provided io.Writer + //Defaults to nil + //If you're using Ginkgo, set this to GinkgoWriter to get improved output during failures + Writer io.Writer + + receivedRequests []*http.Request + requestHandlers []http.HandlerFunc + routedHandlers []routedHandler + + writeLock *sync.Mutex + calls int +} + +//Start() starts an unstarted ghttp server. It is a catastrophic error to call Start more than once (thanks, httptest). +func (s *Server) Start() { + s.HTTPTestServer.Start() +} + +//URL() returns a url that will hit the server +func (s *Server) URL() string { + return s.HTTPTestServer.URL +} + +//Addr() returns the address on which the server is listening. +func (s *Server) Addr() string { + return s.HTTPTestServer.Listener.Addr().String() +} + +//Close() should be called at the end of each test. It spins down and cleans up the test server. +func (s *Server) Close() { + s.writeLock.Lock() + server := s.HTTPTestServer + s.HTTPTestServer = nil + s.writeLock.Unlock() + + if server != nil { + server.Close() + } +} + +//ServeHTTP() makes Server an http.Handler +//When the server receives a request it handles the request in the following order: +// +//1. If the request matches a handler registered with RouteToHandler, that handler is called. +//2. Otherwise, if there are handlers registered via AppendHandlers, those handlers are called in order. +//3. If all registered handlers have been called then: +// a) If AllowUnhandledRequests is set to true, the request will be handled with response code of UnhandledRequestStatusCode +// b) If AllowUnhandledRequests is false, the request will not be handled and the current test will be marked as failed. +func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.writeLock.Lock() + defer func() { + e := recover() + if e != nil { + w.WriteHeader(http.StatusInternalServerError) + } + + //If the handler panics GHTTP will silently succeed. This is bad™. + //To catch this case we need to fail the test if the handler has panicked. + //However, if the handler is panicking because Ginkgo's causing it to panic (i.e. an assertion failed) + //then we shouldn't double-report the error as this will confuse people. + + //So: step 1, if this is a Ginkgo panic - do nothing, Ginkgo's aware of the failure + eAsString, ok := e.(string) + if ok && strings.Contains(eAsString, "defer GinkgoRecover()") { + return + } + + //If we're here, we have to do step 2: assert that the error is nil. This assertion will + //allow us to fail the test suite (note: we can't call Fail since Gomega is not allowed to import Ginkgo). + //Since a failed assertion throws a panic, and we are likely in a goroutine, we need to defer within our defer! + defer func() { + recover() + }() + Expect(e).Should(BeNil(), "Handler Panicked") + }() + + if s.Writer != nil { + s.Writer.Write([]byte(fmt.Sprintf("GHTTP Received Request: %s - %s\n", req.Method, req.URL))) + } + + s.receivedRequests = append(s.receivedRequests, req) + if routedHandler, ok := s.handlerForRoute(req.Method, req.URL.Path); ok { + s.writeLock.Unlock() + routedHandler(w, req) + } else if s.calls < len(s.requestHandlers) { + h := s.requestHandlers[s.calls] + s.calls++ + s.writeLock.Unlock() + h(w, req) + } else { + s.writeLock.Unlock() + if s.GetAllowUnhandledRequests() { + ioutil.ReadAll(req.Body) + req.Body.Close() + w.WriteHeader(s.GetUnhandledRequestStatusCode()) + } else { + formatted, err := httputil.DumpRequest(req, true) + Expect(err).NotTo(HaveOccurred(), "Encountered error while dumping HTTP request") + Expect(string(formatted)).Should(BeNil(), "Received Unhandled Request") + } + } +} + +//ReceivedRequests is an array containing all requests received by the server (both handled and unhandled requests) +func (s *Server) ReceivedRequests() []*http.Request { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + return s.receivedRequests +} + +//RouteToHandler can be used to register handlers that will always handle requests that match +//the passed in method and path. +// +//The path may be either a string object or a *regexp.Regexp. +func (s *Server) RouteToHandler(method string, path interface{}, handler http.HandlerFunc) { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + rh := routedHandler{ + method: method, + handler: handler, + } + + switch p := path.(type) { + case *regexp.Regexp: + rh.pathRegexp = p + case string: + rh.path = p + default: + panic("path must be a string or a regular expression") + } + + for i, existingRH := range s.routedHandlers { + if existingRH.method == method && + reflect.DeepEqual(existingRH.pathRegexp, rh.pathRegexp) && + existingRH.path == rh.path { + s.routedHandlers[i] = rh + return + } + } + s.routedHandlers = append(s.routedHandlers, rh) +} + +func (s *Server) handlerForRoute(method string, path string) (http.HandlerFunc, bool) { + for _, rh := range s.routedHandlers { + if rh.method == method { + if rh.pathRegexp != nil { + if rh.pathRegexp.Match([]byte(path)) { + return rh.handler, true + } + } else if rh.path == path { + return rh.handler, true + } + } + } + + return nil, false +} + +//AppendHandlers will appends http.HandlerFuncs to the server's list of registered handlers. The first incoming request is handled by the first handler, the second by the second, etc... +func (s *Server) AppendHandlers(handlers ...http.HandlerFunc) { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + s.requestHandlers = append(s.requestHandlers, handlers...) +} + +//SetHandler overrides the registered handler at the passed in index with the passed in handler +//This is useful, for example, when a server has been set up in a shared context, but must be tweaked +//for a particular test. +func (s *Server) SetHandler(index int, handler http.HandlerFunc) { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + s.requestHandlers[index] = handler +} + +//GetHandler returns the handler registered at the passed in index. +func (s *Server) GetHandler(index int) http.HandlerFunc { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + return s.requestHandlers[index] +} + +func (s *Server) Reset() { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + s.HTTPTestServer.CloseClientConnections() + s.calls = 0 + s.receivedRequests = nil + s.requestHandlers = nil + s.routedHandlers = nil +} + +//WrapHandler combines the passed in handler with the handler registered at the passed in index. +//This is useful, for example, when a server has been set up in a shared context but must be tweaked +//for a particular test. +// +//If the currently registered handler is A, and the new passed in handler is B then +//WrapHandler will generate a new handler that first calls A, then calls B, and assign it to index +func (s *Server) WrapHandler(index int, handler http.HandlerFunc) { + existingHandler := s.GetHandler(index) + s.SetHandler(index, CombineHandlers(existingHandler, handler)) +} + +func (s *Server) CloseClientConnections() { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + s.HTTPTestServer.CloseClientConnections() +} + +//SetAllowUnhandledRequests enables the server to accept unhandled requests. +func (s *Server) SetAllowUnhandledRequests(allowUnhandledRequests bool) { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + s.AllowUnhandledRequests = allowUnhandledRequests +} + +//GetAllowUnhandledRequests returns true if the server accepts unhandled requests. +func (s *Server) GetAllowUnhandledRequests() bool { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + return s.AllowUnhandledRequests +} + +//SetUnhandledRequestStatusCode status code to be returned when the server receives unhandled requests +func (s *Server) SetUnhandledRequestStatusCode(statusCode int) { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + s.UnhandledRequestStatusCode = statusCode +} + +//GetUnhandledRequestStatusCode returns the current status code being returned for unhandled requests +func (s *Server) GetUnhandledRequestStatusCode() int { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + return s.UnhandledRequestStatusCode +} diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go b/vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go new file mode 100644 index 000000000..7c1236082 --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go @@ -0,0 +1,13 @@ +package ghttp_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestGHTTP(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "GHTTP Suite") +} diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server_test.go b/vendor/github.com/onsi/gomega/ghttp/test_server_test.go new file mode 100644 index 000000000..ac6a269ab --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/test_server_test.go @@ -0,0 +1,1093 @@ +package ghttp_test + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + + "github.com/golang/protobuf/proto" + "github.com/onsi/gomega/gbytes" + "github.com/onsi/gomega/ghttp/protobuf" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/ghttp" +) + +var _ = Describe("TestServer", func() { + var ( + resp *http.Response + err error + s *Server + ) + + BeforeEach(func() { + s = NewServer() + }) + + AfterEach(func() { + s.Close() + }) + + Describe("Resetting the server", func() { + BeforeEach(func() { + s.RouteToHandler("GET", "/", func(w http.ResponseWriter, req *http.Request) {}) + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {}) + http.Get(s.URL() + "/") + + Expect(s.ReceivedRequests()).Should(HaveLen(1)) + }) + + It("clears all handlers and call counts", func() { + s.Reset() + Expect(s.ReceivedRequests()).Should(HaveLen(0)) + Expect(func() { s.GetHandler(0) }).Should(Panic()) + }) + }) + + Describe("closing client connections", func() { + It("closes", func() { + s.RouteToHandler("GET", "/", + func(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, req.RemoteAddr) + }, + ) + client := http.Client{Transport: &http.Transport{DisableKeepAlives: true}} + resp, err := client.Get(s.URL()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(resp.StatusCode).Should(Equal(200)) + + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + Expect(err).ShouldNot(HaveOccurred()) + + s.CloseClientConnections() + + resp, err = client.Get(s.URL()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(resp.StatusCode).Should(Equal(200)) + + body2, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + Expect(err).ShouldNot(HaveOccurred()) + + Expect(body2).ShouldNot(Equal(body)) + }) + }) + + Describe("closing server mulitple times", func() { + It("should not fail", func() { + s.Close() + Expect(s.Close).ShouldNot(Panic()) + }) + }) + + Describe("allowing unhandled requests", func() { + It("is not permitted by default", func() { + Expect(s.GetAllowUnhandledRequests()).To(BeFalse()) + }) + + Context("when true", func() { + BeforeEach(func() { + s.SetAllowUnhandledRequests(true) + s.SetUnhandledRequestStatusCode(http.StatusForbidden) + resp, err = http.Get(s.URL() + "/foo") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should allow unhandled requests and respond with the passed in status code", func() { + Expect(err).ShouldNot(HaveOccurred()) + Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) + + data, err := ioutil.ReadAll(resp.Body) + Expect(err).ShouldNot(HaveOccurred()) + Expect(data).Should(BeEmpty()) + }) + + It("should record the requests", func() { + Expect(s.ReceivedRequests()).Should(HaveLen(1)) + Expect(s.ReceivedRequests()[0].URL.Path).Should(Equal("/foo")) + }) + }) + + Context("when false", func() { + It("should fail when attempting a request", func() { + failures := InterceptGomegaFailures(func() { + http.Get(s.URL() + "/foo") + }) + + Expect(failures[0]).Should(ContainSubstring("Received Unhandled Request")) + }) + }) + }) + + Describe("Managing Handlers", func() { + var called []string + BeforeEach(func() { + called = []string{} + s.RouteToHandler("GET", "/routed", func(w http.ResponseWriter, req *http.Request) { + called = append(called, "r1") + }) + s.RouteToHandler("POST", regexp.MustCompile(`/routed\d`), func(w http.ResponseWriter, req *http.Request) { + called = append(called, "r2") + }) + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) { + called = append(called, "A") + }, func(w http.ResponseWriter, req *http.Request) { + called = append(called, "B") + }) + }) + + It("should prefer routed handlers if there is a match", func() { + http.Get(s.URL() + "/routed") + http.Post(s.URL()+"/routed7", "application/json", nil) + http.Get(s.URL() + "/foo") + http.Get(s.URL() + "/routed") + http.Post(s.URL()+"/routed9", "application/json", nil) + http.Get(s.URL() + "/bar") + + failures := InterceptGomegaFailures(func() { + http.Get(s.URL() + "/foo") + http.Get(s.URL() + "/routed/not/a/match") + http.Get(s.URL() + "/routed7") + http.Post(s.URL()+"/routed", "application/json", nil) + }) + + Expect(failures[0]).Should(ContainSubstring("Received Unhandled Request")) + Expect(failures).Should(HaveLen(4)) + + http.Post(s.URL()+"/routed3", "application/json", nil) + + Expect(called).Should(Equal([]string{"r1", "r2", "A", "r1", "r2", "B", "r2"})) + }) + + It("should override routed handlers when reregistered", func() { + s.RouteToHandler("GET", "/routed", func(w http.ResponseWriter, req *http.Request) { + called = append(called, "r3") + }) + s.RouteToHandler("POST", regexp.MustCompile(`/routed\d`), func(w http.ResponseWriter, req *http.Request) { + called = append(called, "r4") + }) + + http.Get(s.URL() + "/routed") + http.Post(s.URL()+"/routed7", "application/json", nil) + + Expect(called).Should(Equal([]string{"r3", "r4"})) + }) + + It("should call the appended handlers, in order, as requests come in", func() { + http.Get(s.URL() + "/foo") + Expect(called).Should(Equal([]string{"A"})) + + http.Get(s.URL() + "/foo") + Expect(called).Should(Equal([]string{"A", "B"})) + + failures := InterceptGomegaFailures(func() { + http.Get(s.URL() + "/foo") + }) + + Expect(failures[0]).Should(ContainSubstring("Received Unhandled Request")) + }) + + Describe("Overwriting an existing handler", func() { + BeforeEach(func() { + s.SetHandler(0, func(w http.ResponseWriter, req *http.Request) { + called = append(called, "C") + }) + }) + + It("should override the specified handler", func() { + http.Get(s.URL() + "/foo") + http.Get(s.URL() + "/foo") + Expect(called).Should(Equal([]string{"C", "B"})) + }) + }) + + Describe("Getting an existing handler", func() { + It("should return the handler func", func() { + s.GetHandler(1)(nil, nil) + Expect(called).Should(Equal([]string{"B"})) + }) + }) + + Describe("Wrapping an existing handler", func() { + BeforeEach(func() { + s.WrapHandler(0, func(w http.ResponseWriter, req *http.Request) { + called = append(called, "C") + }) + }) + + It("should wrap the existing handler in a new handler", func() { + http.Get(s.URL() + "/foo") + http.Get(s.URL() + "/foo") + Expect(called).Should(Equal([]string{"A", "C", "B"})) + }) + }) + }) + + Describe("When a handler fails", func() { + BeforeEach(func() { + s.SetUnhandledRequestStatusCode(http.StatusForbidden) //just to be clear that 500s aren't coming from unhandled requests + }) + + Context("because the handler has panicked", func() { + BeforeEach(func() { + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) { + panic("bam") + }) + }) + + It("should respond with a 500 and make a failing assertion", func() { + var resp *http.Response + var err error + + failures := InterceptGomegaFailures(func() { + resp, err = http.Get(s.URL()) + }) + + Expect(err).ShouldNot(HaveOccurred()) + Expect(resp.StatusCode).Should(Equal(http.StatusInternalServerError)) + Expect(failures).Should(ConsistOf(ContainSubstring("Handler Panicked"))) + }) + }) + + Context("because an assertion has failed", func() { + BeforeEach(func() { + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) { + // Expect(true).Should(BeFalse()) <-- would be nice to do it this way, but the test just can't be written this way + + By("We're cheating a bit here -- we're throwing a GINKGO_PANIC which simulates a failed assertion") + panic(GINKGO_PANIC) + }) + }) + + It("should respond with a 500 and *not* make a failing assertion, instead relying on Ginkgo to have already been notified of the error", func() { + resp, err := http.Get(s.URL()) + + Expect(err).ShouldNot(HaveOccurred()) + Expect(resp.StatusCode).Should(Equal(http.StatusInternalServerError)) + }) + }) + }) + + Describe("Logging to the Writer", func() { + var buf *gbytes.Buffer + BeforeEach(func() { + buf = gbytes.NewBuffer() + s.Writer = buf + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {}) + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {}) + }) + + It("should write to the buffer when a request comes in", func() { + http.Get(s.URL() + "/foo") + Expect(buf).Should(gbytes.Say("GHTTP Received Request: GET - /foo\n")) + + http.Post(s.URL()+"/bar", "", nil) + Expect(buf).Should(gbytes.Say("GHTTP Received Request: POST - /bar\n")) + }) + }) + + Describe("Request Handlers", func() { + Describe("VerifyRequest", func() { + BeforeEach(func() { + s.AppendHandlers(VerifyRequest("GET", "/foo")) + }) + + It("should verify the method, path", func() { + resp, err = http.Get(s.URL() + "/foo?baz=bar") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the method, path", func() { + failures := InterceptGomegaFailures(func() { + http.Get(s.URL() + "/foo2") + }) + Expect(failures).Should(HaveLen(1)) + }) + + It("should verify the method, path", func() { + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/foo", "application/json", nil) + }) + Expect(failures).Should(HaveLen(1)) + }) + + Context("when passed a rawQuery", func() { + It("should also be possible to verify the rawQuery", func() { + s.SetHandler(0, VerifyRequest("GET", "/foo", "baz=bar")) + resp, err = http.Get(s.URL() + "/foo?baz=bar") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should match irregardless of query parameter ordering", func() { + s.SetHandler(0, VerifyRequest("GET", "/foo", "type=get&name=money")) + u, _ := url.Parse(s.URL() + "/foo") + u.RawQuery = url.Values{ + "type": []string{"get"}, + "name": []string{"money"}, + }.Encode() + + resp, err = http.Get(u.String()) + Expect(err).ShouldNot(HaveOccurred()) + }) + }) + + Context("when passed a matcher for path", func() { + It("should apply the matcher", func() { + s.SetHandler(0, VerifyRequest("GET", MatchRegexp(`/foo/[a-f]*/3`))) + resp, err = http.Get(s.URL() + "/foo/abcdefa/3") + Expect(err).ShouldNot(HaveOccurred()) + }) + }) + }) + + Describe("VerifyContentType", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyContentType("application/octet-stream"), + )) + }) + + It("should verify the content type", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Expect(err).ShouldNot(HaveOccurred()) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err = http.DefaultClient.Do(req) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the content type", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Expect(err).ShouldNot(HaveOccurred()) + req.Header.Set("Content-Type", "application/json") + + failures := InterceptGomegaFailures(func() { + http.DefaultClient.Do(req) + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + + Describe("Verify BasicAuth", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyBasicAuth("bob", "password"), + )) + }) + + It("should verify basic auth", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Expect(err).ShouldNot(HaveOccurred()) + req.SetBasicAuth("bob", "password") + + resp, err = http.DefaultClient.Do(req) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should verify basic auth", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Expect(err).ShouldNot(HaveOccurred()) + req.SetBasicAuth("bob", "bassword") + + failures := InterceptGomegaFailures(func() { + http.DefaultClient.Do(req) + }) + Expect(failures).Should(HaveLen(1)) + }) + + It("should require basic auth header", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Expect(err).ShouldNot(HaveOccurred()) + + failures := InterceptGomegaFailures(func() { + http.DefaultClient.Do(req) + }) + Expect(failures).Should(ContainElement(ContainSubstring("Authorization header must be specified"))) + }) + }) + + Describe("VerifyHeader", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyHeader(http.Header{ + "accept": []string{"jpeg", "png"}, + "cache-control": []string{"omicron"}, + "Return-Path": []string{"hobbiton"}, + }), + )) + }) + + It("should verify the headers", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Expect(err).ShouldNot(HaveOccurred()) + req.Header.Add("Accept", "jpeg") + req.Header.Add("Accept", "png") + req.Header.Add("Cache-Control", "omicron") + req.Header.Add("return-path", "hobbiton") + + resp, err = http.DefaultClient.Do(req) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the headers", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Expect(err).ShouldNot(HaveOccurred()) + req.Header.Add("Schmaccept", "jpeg") + req.Header.Add("Schmaccept", "png") + req.Header.Add("Cache-Control", "omicron") + req.Header.Add("return-path", "hobbiton") + + failures := InterceptGomegaFailures(func() { + http.DefaultClient.Do(req) + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + + Describe("VerifyHeaderKV", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyHeaderKV("accept", "jpeg", "png"), + VerifyHeaderKV("cache-control", "omicron"), + VerifyHeaderKV("Return-Path", "hobbiton"), + )) + }) + + It("should verify the headers", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Expect(err).ShouldNot(HaveOccurred()) + req.Header.Add("Accept", "jpeg") + req.Header.Add("Accept", "png") + req.Header.Add("Cache-Control", "omicron") + req.Header.Add("return-path", "hobbiton") + + resp, err = http.DefaultClient.Do(req) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the headers", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Expect(err).ShouldNot(HaveOccurred()) + req.Header.Add("Accept", "jpeg") + req.Header.Add("Cache-Control", "omicron") + req.Header.Add("return-path", "hobbiton") + + failures := InterceptGomegaFailures(func() { + http.DefaultClient.Do(req) + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + + Describe("VerifyBody", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + VerifyBody([]byte("some body")), + )) + }) + + It("should verify the body", func() { + resp, err = http.Post(s.URL()+"/foo", "", bytes.NewReader([]byte("some body"))) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the body", func() { + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/foo", "", bytes.NewReader([]byte("wrong body"))) + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + + Describe("VerifyJSON", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + VerifyJSON(`{"a":3, "b":2}`), + )) + }) + + It("should verify the json body and the content type", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`{"b":2, "a":3}`))) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the json body and the content type", func() { + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`{"b":2, "a":4}`))) + }) + Expect(failures).Should(HaveLen(1)) + }) + + It("should verify the json body and the content type", func() { + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/foo", "application/not-json", bytes.NewReader([]byte(`{"b":2, "a":3}`))) + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + + Describe("VerifyJSONRepresenting", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + VerifyJSONRepresenting([]int{1, 3, 5}), + )) + }) + + It("should verify the json body and the content type", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`[1,3,5]`))) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the json body and the content type", func() { + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`[1,3]`))) + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + + Describe("VerifyForm", func() { + var formValues url.Values + + BeforeEach(func() { + formValues = make(url.Values) + formValues.Add("users", "user1") + formValues.Add("users", "user2") + formValues.Add("group", "users") + }) + + Context("when encoded in the URL", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyForm(url.Values{ + "users": []string{"user1", "user2"}, + "group": []string{"users"}, + }), + )) + }) + + It("should verify form values", func() { + resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode()) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should ignore extra values", func() { + formValues.Add("extra", "value") + resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode()) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("fail on missing values", func() { + formValues.Del("group") + failures := InterceptGomegaFailures(func() { + resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode()) + }) + Expect(failures).Should(HaveLen(1)) + }) + + It("fail on incorrect values", func() { + formValues.Set("group", "wheel") + failures := InterceptGomegaFailures(func() { + resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode()) + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + + Context("when present in the body", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + VerifyForm(url.Values{ + "users": []string{"user1", "user2"}, + "group": []string{"users"}, + }), + )) + }) + + It("should verify form values", func() { + resp, err = http.PostForm(s.URL()+"/foo", formValues) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should ignore extra values", func() { + formValues.Add("extra", "value") + resp, err = http.PostForm(s.URL()+"/foo", formValues) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("fail on missing values", func() { + formValues.Del("group") + failures := InterceptGomegaFailures(func() { + resp, err = http.PostForm(s.URL()+"/foo", formValues) + }) + Expect(failures).Should(HaveLen(1)) + }) + + It("fail on incorrect values", func() { + formValues.Set("group", "wheel") + failures := InterceptGomegaFailures(func() { + resp, err = http.PostForm(s.URL()+"/foo", formValues) + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + }) + + Describe("VerifyFormKV", func() { + Context("when encoded in the URL", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyFormKV("users", "user1", "user2"), + )) + }) + + It("verifies the form value", func() { + resp, err = http.Get(s.URL() + "/foo?users=user1&users=user2") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("verifies the form value", func() { + failures := InterceptGomegaFailures(func() { + resp, err = http.Get(s.URL() + "/foo?users=user1") + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + + Context("when present in the body", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + VerifyFormKV("users", "user1", "user2"), + )) + }) + + It("verifies the form value", func() { + resp, err = http.PostForm(s.URL()+"/foo", url.Values{"users": []string{"user1", "user2"}}) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("verifies the form value", func() { + failures := InterceptGomegaFailures(func() { + resp, err = http.PostForm(s.URL()+"/foo", url.Values{"users": []string{"user1"}}) + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + }) + + Describe("VerifyProtoRepresenting", func() { + var message *protobuf.SimpleMessage + + BeforeEach(func() { + message = new(protobuf.SimpleMessage) + message.Description = proto.String("A description") + message.Id = proto.Int32(0) + + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/proto"), + VerifyProtoRepresenting(message), + )) + }) + + It("verifies the proto body and the content type", func() { + serialized, err := proto.Marshal(message) + Expect(err).ShouldNot(HaveOccurred()) + + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", bytes.NewReader(serialized)) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the proto body and the content type", func() { + serialized, err := proto.Marshal(&protobuf.SimpleMessage{ + Description: proto.String("A description"), + Id: proto.Int32(0), + Metadata: proto.String("some metadata"), + }) + Expect(err).ShouldNot(HaveOccurred()) + + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/proto", "application/x-protobuf", bytes.NewReader(serialized)) + }) + Expect(failures).Should(HaveLen(1)) + }) + + It("should verify the proto body and the content type", func() { + serialized, err := proto.Marshal(message) + Expect(err).ShouldNot(HaveOccurred()) + + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/proto", "application/not-x-protobuf", bytes.NewReader(serialized)) + }) + Expect(failures).Should(HaveLen(1)) + }) + }) + + Describe("RespondWith", func() { + Context("without headers", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWith(http.StatusCreated, "sweet"), + ), CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWith(http.StatusOK, []byte("sour")), + )) + }) + + It("should return the response", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.StatusCode).Should(Equal(http.StatusCreated)) + + body, err := ioutil.ReadAll(resp.Body) + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(Equal([]byte("sweet"))) + + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + body, err = ioutil.ReadAll(resp.Body) + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(Equal([]byte("sour"))) + }) + }) + + Context("with headers", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWith(http.StatusCreated, "sweet", http.Header{"X-Custom-Header": []string{"my header"}}), + )) + }) + + It("should return the headers too", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.StatusCode).Should(Equal(http.StatusCreated)) + Expect(ioutil.ReadAll(resp.Body)).Should(Equal([]byte("sweet"))) + Expect(resp.Header.Get("X-Custom-Header")).Should(Equal("my header")) + }) + }) + }) + + Describe("RespondWithPtr", func() { + var code int + var byteBody []byte + var stringBody string + BeforeEach(func() { + code = http.StatusOK + byteBody = []byte("sweet") + stringBody = "sour" + + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithPtr(&code, &byteBody), + ), CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithPtr(&code, &stringBody), + )) + }) + + It("should return the response", func() { + code = http.StatusCreated + byteBody = []byte("tasty") + stringBody = "treat" + + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.StatusCode).Should(Equal(http.StatusCreated)) + + body, err := ioutil.ReadAll(resp.Body) + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(Equal([]byte("tasty"))) + + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.StatusCode).Should(Equal(http.StatusCreated)) + + body, err = ioutil.ReadAll(resp.Body) + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(Equal([]byte("treat"))) + }) + + Context("when passed a nil body", func() { + BeforeEach(func() { + s.SetHandler(0, CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithPtr(&code, nil), + )) + }) + + It("should return an empty body and not explode", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + + Expect(err).ShouldNot(HaveOccurred()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + body, err := ioutil.ReadAll(resp.Body) + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(BeEmpty()) + + Expect(s.ReceivedRequests()).Should(HaveLen(1)) + }) + }) + }) + + Describe("RespondWithJSON", func() { + Context("when no optional headers are set", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithJSONEncoded(http.StatusCreated, []int{1, 2, 3}), + )) + }) + + It("should return the response", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.StatusCode).Should(Equal(http.StatusCreated)) + + body, err := ioutil.ReadAll(resp.Body) + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(MatchJSON("[1,2,3]")) + }) + + It("should set the Content-Type header to application/json", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"})) + }) + }) + + Context("when optional headers are set", func() { + var headers http.Header + BeforeEach(func() { + headers = http.Header{"Stuff": []string{"things"}} + }) + + JustBeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithJSONEncoded(http.StatusCreated, []int{1, 2, 3}, headers), + )) + }) + + It("should preserve those headers", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Stuff"]).Should(Equal([]string{"things"})) + }) + + It("should set the Content-Type header to application/json", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"})) + }) + + Context("when setting the Content-Type explicitly", func() { + BeforeEach(func() { + headers["Content-Type"] = []string{"not-json"} + }) + + It("should use the Content-Type header that was explicitly set", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Content-Type"]).Should(Equal([]string{"not-json"})) + }) + }) + }) + }) + + Describe("RespondWithJSONPtr", func() { + type testObject struct { + Key string + Value string + } + + var code int + var object testObject + + Context("when no optional headers are set", func() { + BeforeEach(func() { + code = http.StatusOK + object = testObject{} + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithJSONEncodedPtr(&code, &object), + )) + }) + + It("should return the response", func() { + code = http.StatusCreated + object = testObject{ + Key: "Jim", + Value: "Codes", + } + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.StatusCode).Should(Equal(http.StatusCreated)) + + body, err := ioutil.ReadAll(resp.Body) + Expect(err).ShouldNot(HaveOccurred()) + Expect(body).Should(MatchJSON(`{"Key": "Jim", "Value": "Codes"}`)) + }) + + It("should set the Content-Type header to application/json", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"})) + }) + }) + + Context("when optional headers are set", func() { + var headers http.Header + BeforeEach(func() { + headers = http.Header{"Stuff": []string{"things"}} + }) + + JustBeforeEach(func() { + code = http.StatusOK + object = testObject{} + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithJSONEncodedPtr(&code, &object, headers), + )) + }) + + It("should preserve those headers", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Stuff"]).Should(Equal([]string{"things"})) + }) + + It("should set the Content-Type header to application/json", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"})) + }) + + Context("when setting the Content-Type explicitly", func() { + BeforeEach(func() { + headers["Content-Type"] = []string{"not-json"} + }) + + It("should use the Content-Type header that was explicitly set", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Content-Type"]).Should(Equal([]string{"not-json"})) + }) + }) + }) + }) + + Describe("RespondWithProto", func() { + var message *protobuf.SimpleMessage + + BeforeEach(func() { + message = new(protobuf.SimpleMessage) + message.Description = proto.String("A description") + message.Id = proto.Int32(99) + }) + + Context("when no optional headers are set", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/proto"), + RespondWithProto(http.StatusCreated, message), + )) + }) + + It("should return the response", func() { + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.StatusCode).Should(Equal(http.StatusCreated)) + + var received protobuf.SimpleMessage + body, err := ioutil.ReadAll(resp.Body) + err = proto.Unmarshal(body, &received) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should set the Content-Type header to application/x-protobuf", func() { + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/x-protobuf"})) + }) + }) + + Context("when optional headers are set", func() { + var headers http.Header + BeforeEach(func() { + headers = http.Header{"Stuff": []string{"things"}} + }) + + JustBeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/proto"), + RespondWithProto(http.StatusCreated, message, headers), + )) + }) + + It("should preserve those headers", func() { + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Stuff"]).Should(Equal([]string{"things"})) + }) + + It("should set the Content-Type header to application/x-protobuf", func() { + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Content-Type"]).Should(Equal([]string{"application/x-protobuf"})) + }) + + Context("when setting the Content-Type explicitly", func() { + BeforeEach(func() { + headers["Content-Type"] = []string{"not-x-protobuf"} + }) + + It("should use the Content-Type header that was explicitly set", func() { + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(resp.Header["Content-Type"]).Should(Equal([]string{"not-x-protobuf"})) + }) + }) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go new file mode 100644 index 000000000..6c75d78bf --- /dev/null +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -0,0 +1,396 @@ +/* +Gomega is the Ginkgo BDD-style testing framework's preferred matcher library. + +The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/ + +Gomega on Github: http://github.com/onsi/gomega + +Learn more about Ginkgo online: http://onsi.github.io/ginkgo + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Gomega is MIT-Licensed +*/ +package gomega + +import ( + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/assertion" + "github.com/onsi/gomega/internal/asyncassertion" + "github.com/onsi/gomega/internal/testingtsupport" + "github.com/onsi/gomega/types" +) + +const GOMEGA_VERSION = "1.4.0" + +const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. +If you're using Ginkgo then you probably forgot to put your assertion in an It(). +Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT(). +Depending on your vendoring solution you may be inadvertently importing gomega and subpackages (e.g. ghhtp, gexec,...) from different locations. +` + +var globalFailHandler types.GomegaFailHandler + +var defaultEventuallyTimeout = time.Second +var defaultEventuallyPollingInterval = 10 * time.Millisecond +var defaultConsistentlyDuration = 100 * time.Millisecond +var defaultConsistentlyPollingInterval = 10 * time.Millisecond + +//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails +//the fail handler passed into RegisterFailHandler is called. +func RegisterFailHandler(handler types.GomegaFailHandler) { + globalFailHandler = handler +} + +//RegisterTestingT connects Gomega to Golang's XUnit style +//Testing.T tests. It is now deprecated and you should use NewGomegaWithT() instead. +// +//Legacy Documentation: +// +//You'll need to call this at the top of each XUnit style test: +// +// func TestFarmHasCow(t *testing.T) { +// RegisterTestingT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +// +// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to +// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests +// in parallel as the global fail handler cannot point to more than one testing.T at a time. +// +// NewGomegaWithT() does not have this limitation +// +// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). +func RegisterTestingT(t types.GomegaTestingT) { + RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t)) +} + +//InterceptGomegaHandlers runs a given callback and returns an array of +//failure messages generated by any Gomega assertions within the callback. +// +//This is accomplished by temporarily replacing the *global* fail handler +//with a fail handler that simply annotates failures. The original fail handler +//is reset when InterceptGomegaFailures returns. +// +//This is most useful when testing custom matchers, but can also be used to check +//on a value using a Gomega assertion without causing a test failure. +func InterceptGomegaFailures(f func()) []string { + originalHandler := globalFailHandler + failures := []string{} + RegisterFailHandler(func(message string, callerSkip ...int) { + failures = append(failures, message) + }) + f() + RegisterFailHandler(originalHandler) + return failures +} + +//Ω wraps an actual value allowing assertions to be made on it: +// Ω("foo").Should(Equal("foo")) +// +//If Ω is passed more than one argument it will pass the *first* argument to the matcher. +//All subsequent arguments will be required to be nil/zero. +// +//This is convenient if you want to make an assertion on a method/function that returns +//a value and an error - a common patter in Go. +// +//For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +//Then: +// Ω(MyAmazingThing()).Should(Equal(3)) +//Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +//Ω and Expect are identical +func Ω(actual interface{}, extra ...interface{}) GomegaAssertion { + return ExpectWithOffset(0, actual, extra...) +} + +//Expect wraps an actual value allowing assertions to be made on it: +// Expect("foo").To(Equal("foo")) +// +//If Expect is passed more than one argument it will pass the *first* argument to the matcher. +//All subsequent arguments will be required to be nil/zero. +// +//This is convenient if you want to make an assertion on a method/function that returns +//a value and an error - a common patter in Go. +// +//For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +//Then: +// Expect(MyAmazingThing()).Should(Equal(3)) +//Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +//Expect and Ω are identical +func Expect(actual interface{}, extra ...interface{}) GomegaAssertion { + return ExpectWithOffset(0, actual, extra...) +} + +//ExpectWithOffset wraps an actual value allowing assertions to be made on it: +// ExpectWithOffset(1, "foo").To(Equal("foo")) +// +//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument +//this is used to modify the call-stack offset when computing line numbers. +// +//This is most useful in helper functions that make assertions. If you want Gomega's +//error message to refer to the calling line in the test (as opposed to the line in the helper function) +//set the first argument of `ExpectWithOffset` appropriately. +func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + return assertion.New(actual, globalFailHandler, offset, extra...) +} + +//Eventually wraps an actual value allowing assertions to be made on it. +//The assertion is tried periodically until it passes or a timeout occurs. +// +//Both the timeout and polling interval are configurable as optional arguments: +//The first optional argument is the timeout +//The second optional argument is the polling interval +// +//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +//last case they are interpreted as seconds. +// +//If Eventually is passed an actual that is a function taking no arguments and returning at least one value, +//then Eventually will call the function periodically and try the matcher against the function's first return value. +// +//Example: +// +// Eventually(func() int { +// return thingImPolling.Count() +// }).Should(BeNumerically(">=", 17)) +// +//Note that this example could be rewritten: +// +// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) +// +//If the function returns more than one value, then Eventually will pass the first value to the matcher and +//assert that all other values are nil/zero. +//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. +// +//For example, consider a method that returns a value and an error: +// func FetchFromDB() (string, error) +// +//Then +// Eventually(FetchFromDB).Should(Equal("hasselhoff")) +// +//Will pass only if the the returned error is nil and the returned string passes the matcher. +// +//Eventually's default timeout is 1 second, and its default polling interval is 10ms +func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + return EventuallyWithOffset(0, actual, intervals...) +} + +//EventuallyWithOffset operates like Eventually but takes an additional +//initial argument to indicate an offset in the call stack. This is useful when building helper +//functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) +} + +//Consistently wraps an actual value allowing assertions to be made on it. +//The assertion is tried periodically and is required to pass for a period of time. +// +//Both the total time and polling interval are configurable as optional arguments: +//The first optional argument is the duration that Consistently will run for +//The second optional argument is the polling interval +// +//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +//last case they are interpreted as seconds. +// +//If Consistently is passed an actual that is a function taking no arguments and returning at least one value, +//then Consistently will call the function periodically and try the matcher against the function's first return value. +// +//If the function returns more than one value, then Consistently will pass the first value to the matcher and +//assert that all other values are nil/zero. +//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. +// +//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. +//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: +// +// Consistently(channel).ShouldNot(Receive()) +// +//Consistently's default duration is 100ms, and its default polling interval is 10ms +func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + return ConsistentlyWithOffset(0, actual, intervals...) +} + +//ConsistentlyWithOffset operates like Consistnetly but takes an additional +//initial argument to indicate an offset in the call stack. This is useful when building helper +//functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) +} + +//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. +func SetDefaultEventuallyTimeout(t time.Duration) { + defaultEventuallyTimeout = t +} + +//Set the default polling interval for Eventually. +func SetDefaultEventuallyPollingInterval(t time.Duration) { + defaultEventuallyPollingInterval = t +} + +//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long. +func SetDefaultConsistentlyDuration(t time.Duration) { + defaultConsistentlyDuration = t +} + +//Set the default polling interval for Consistently. +func SetDefaultConsistentlyPollingInterval(t time.Duration) { + defaultConsistentlyPollingInterval = t +} + +//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against +//the matcher passed to the Should and ShouldNot methods. +// +//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to +//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more +//descriptive +// +//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. +// +//Example: +// +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") +type GomegaAsyncAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher +//passed to the Should/ShouldNot and To/ToNot/NotTo methods. +// +//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect +//though this is not enforced. +// +//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() +//and is used to annotate failure messages. +// +//All methods return a bool that is true if hte assertion passed and false if it failed. +// +//Example: +// +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +type GomegaAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + + To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it +type OmegaMatcher types.GomegaMatcher + +//GomegaWithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage +//Gomega's rich ecosystem of matchers in standard `testing` test suites. +// +//Use `NewGomegaWithT` to instantiate a `GomegaWithT` +type GomegaWithT struct { + t types.GomegaTestingT +} + +//NewGomegaWithT takes a *testing.T and returngs a `GomegaWithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with +//Gomega's rich ecosystem of matchers in standard `testing` test suits. +// +// func TestFarmHasCow(t *testing.T) { +// g := GomegaWithT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT { + return &GomegaWithT{ + t: t, + } +} + +//See documentation for Expect +func (g *GomegaWithT) Expect(actual interface{}, extra ...interface{}) GomegaAssertion { + return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailHandler(g.t), 0, extra...) +} + +//See documentation for Eventually +func (g *GomegaWithT) Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailHandler(g.t), timeoutInterval, pollingInterval, 0) +} + +//See documentation for Consistently +func (g *GomegaWithT) Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailHandler(g.t), timeoutInterval, pollingInterval, 0) +} + +func toDuration(input interface{}) time.Duration { + duration, ok := input.(time.Duration) + if ok { + return duration + } + + value := reflect.ValueOf(input) + kind := reflect.TypeOf(input).Kind() + + if reflect.Int <= kind && kind <= reflect.Int64 { + return time.Duration(value.Int()) * time.Second + } else if reflect.Uint <= kind && kind <= reflect.Uint64 { + return time.Duration(value.Uint()) * time.Second + } else if reflect.Float32 <= kind && kind <= reflect.Float64 { + return time.Duration(value.Float() * float64(time.Second)) + } else if reflect.String == kind { + duration, err := time.ParseDuration(value.String()) + if err != nil { + panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + } + return duration + } + + panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) +} diff --git a/vendor/github.com/onsi/gomega/gstruct/elements.go b/vendor/github.com/onsi/gomega/gstruct/elements.go new file mode 100644 index 000000000..a315fa139 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/elements.go @@ -0,0 +1,145 @@ +package gstruct + +import ( + "errors" + "fmt" + "reflect" + "runtime/debug" + + "github.com/onsi/gomega/format" + errorsutil "github.com/onsi/gomega/gstruct/errors" + "github.com/onsi/gomega/types" +) + +//MatchAllElements succeeds if every element of a slice matches the element matcher it maps to +//through the id function, and every element matcher is matched. +// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, matchers.Elements{ +// "a": BeEqual("a"), +// "b": BeEqual("b"), +// }) +func MatchAllElements(identifier Identifier, elements Elements) types.GomegaMatcher { + return &ElementsMatcher{ + Identifier: identifier, + Elements: elements, + } +} + +//MatchElements succeeds if each element of a slice matches the element matcher it maps to +//through the id function. It can ignore extra elements and/or missing elements. +// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing|IgnoreExtra, matchers.Elements{ +// "a": BeEqual("a") +// "b": BeEqual("b"), +// }) +func MatchElements(identifier Identifier, options Options, elements Elements) types.GomegaMatcher { + return &ElementsMatcher{ + Identifier: identifier, + Elements: elements, + IgnoreExtras: options&IgnoreExtras != 0, + IgnoreMissing: options&IgnoreMissing != 0, + AllowDuplicates: options&AllowDuplicates != 0, + } +} + +// ElementsMatcher is a NestingMatcher that applies custom matchers to each element of a slice mapped +// by the Identifier function. +// TODO: Extend this to work with arrays & maps (map the key) as well. +type ElementsMatcher struct { + // Matchers for each element. + Elements Elements + // Function mapping an element to the string key identifying its matcher. + Identifier Identifier + + // Whether to ignore extra elements or consider it an error. + IgnoreExtras bool + // Whether to ignore missing elements or consider it an error. + IgnoreMissing bool + // Whether to key duplicates when matching IDs. + AllowDuplicates bool + + // State. + failures []error +} + +// Element ID to matcher. +type Elements map[string]types.GomegaMatcher + +// Function for identifying (mapping) elements. +type Identifier func(element interface{}) string + +func (m *ElementsMatcher) Match(actual interface{}) (success bool, err error) { + if reflect.TypeOf(actual).Kind() != reflect.Slice { + return false, fmt.Errorf("%v is type %T, expected slice", actual, actual) + } + + m.failures = m.matchElements(actual) + if len(m.failures) > 0 { + return false, nil + } + return true, nil +} + +func (m *ElementsMatcher) matchElements(actual interface{}) (errs []error) { + // Provide more useful error messages in the case of a panic. + defer func() { + if err := recover(); err != nil { + errs = append(errs, fmt.Errorf("panic checking %+v: %v\n%s", actual, err, debug.Stack())) + } + }() + + val := reflect.ValueOf(actual) + elements := map[string]bool{} + for i := 0; i < val.Len(); i++ { + element := val.Index(i).Interface() + id := m.Identifier(element) + if elements[id] { + if !m.AllowDuplicates { + errs = append(errs, fmt.Errorf("found duplicate element ID %s", id)) + continue + } + } + elements[id] = true + + matcher, expected := m.Elements[id] + if !expected { + if !m.IgnoreExtras { + errs = append(errs, fmt.Errorf("unexpected element %s", id)) + } + continue + } + + match, err := matcher.Match(element) + if match { + continue + } + + if err == nil { + if nesting, ok := matcher.(errorsutil.NestingMatcher); ok { + err = errorsutil.AggregateError(nesting.Failures()) + } else { + err = errors.New(matcher.FailureMessage(element)) + } + } + errs = append(errs, errorsutil.Nest(fmt.Sprintf("[%s]", id), err)) + } + + for id := range m.Elements { + if !elements[id] && !m.IgnoreMissing { + errs = append(errs, fmt.Errorf("missing expected element %s", id)) + } + } + + return errs +} + +func (m *ElementsMatcher) FailureMessage(actual interface{}) (message string) { + failure := errorsutil.AggregateError(m.failures) + return format.Message(actual, fmt.Sprintf("to match elements: %v", failure)) +} + +func (m *ElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match elements") +} + +func (m *ElementsMatcher) Failures() []error { + return m.failures +} diff --git a/vendor/github.com/onsi/gomega/gstruct/elements_test.go b/vendor/github.com/onsi/gomega/gstruct/elements_test.go new file mode 100644 index 000000000..355d463eb --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/elements_test.go @@ -0,0 +1,144 @@ +package gstruct_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" +) + +var _ = Describe("Slice", func() { + allElements := []string{"a", "b"} + missingElements := []string{"a"} + extraElements := []string{"a", "b", "c"} + duplicateElements := []string{"a", "a", "b"} + empty := []string{} + var nils []string + + It("should strictly match all elements", func() { + m := MatchAllElements(id, Elements{ + "b": Equal("b"), + "a": Equal("a"), + }) + Expect(allElements).Should(m, "should match all elements") + Expect(missingElements).ShouldNot(m, "should fail with missing elements") + Expect(extraElements).ShouldNot(m, "should fail with extra elements") + Expect(duplicateElements).ShouldNot(m, "should fail with duplicate elements") + Expect(nils).ShouldNot(m, "should fail with an uninitialized slice") + + m = MatchAllElements(id, Elements{ + "a": Equal("a"), + "b": Equal("fail"), + }) + Expect(allElements).ShouldNot(m, "should run nested matchers") + + m = MatchAllElements(id, Elements{}) + Expect(empty).Should(m, "should handle empty slices") + Expect(allElements).ShouldNot(m, "should handle only empty slices") + Expect(nils).Should(m, "should handle nil slices") + }) + + It("should ignore extra elements", func() { + m := MatchElements(id, IgnoreExtras, Elements{ + "b": Equal("b"), + "a": Equal("a"), + }) + Expect(allElements).Should(m, "should match all elements") + Expect(missingElements).ShouldNot(m, "should fail with missing elements") + Expect(extraElements).Should(m, "should ignore extra elements") + Expect(duplicateElements).ShouldNot(m, "should fail with duplicate elements") + Expect(nils).ShouldNot(m, "should fail with an uninitialized slice") + }) + + It("should ignore missing elements", func() { + m := MatchElements(id, IgnoreMissing, Elements{ + "a": Equal("a"), + "b": Equal("b"), + }) + Expect(allElements).Should(m, "should match all elements") + Expect(missingElements).Should(m, "should ignore missing elements") + Expect(extraElements).ShouldNot(m, "should fail with extra elements") + Expect(duplicateElements).ShouldNot(m, "should fail with duplicate elements") + Expect(nils).Should(m, "should ignore an uninitialized slice") + }) + + It("should ignore missing and extra elements", func() { + m := MatchElements(id, IgnoreMissing|IgnoreExtras, Elements{ + "a": Equal("a"), + "b": Equal("b"), + }) + Expect(allElements).Should(m, "should match all elements") + Expect(missingElements).Should(m, "should ignore missing elements") + Expect(extraElements).Should(m, "should ignore extra elements") + Expect(duplicateElements).ShouldNot(m, "should fail with duplicate elements") + Expect(nils).Should(m, "should ignore an uninitialized slice") + + m = MatchElements(id, IgnoreExtras|IgnoreMissing, Elements{ + "a": Equal("a"), + "b": Equal("fail"), + }) + Expect(allElements).ShouldNot(m, "should run nested matchers") + }) + + Context("with elements that share a key", func() { + nonUniqueID := func(element interface{}) string { + return element.(string)[0:1] + } + + allElements := []string{"a123", "a213", "b321"} + includingBadElements := []string{"a123", "b123", "b5555"} + extraElements := []string{"a123", "b1234", "c345"} + missingElements := []string{"b123", "b1234", "b1345"} + + It("should strictly allow multiple matches", func() { + m := MatchElements(nonUniqueID, AllowDuplicates, Elements{ + "a": ContainSubstring("1"), + "b": ContainSubstring("1"), + }) + Expect(allElements).Should(m, "should match all elements") + Expect(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher") + Expect(extraElements).ShouldNot(m, "should reject with extra keys") + Expect(missingElements).ShouldNot(m, "should reject with missing keys") + Expect(nils).ShouldNot(m, "should fail with an uninitialized slice") + }) + + It("should ignore missing", func() { + m := MatchElements(nonUniqueID, AllowDuplicates|IgnoreMissing, Elements{ + "a": ContainSubstring("1"), + "b": ContainSubstring("1"), + }) + Expect(allElements).Should(m, "should match all elements") + Expect(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher") + Expect(extraElements).ShouldNot(m, "should reject with extra keys") + Expect(missingElements).Should(m, "should allow missing keys") + Expect(nils).Should(m, "should allow an uninitialized slice") + }) + + It("should ignore extras", func() { + m := MatchElements(nonUniqueID, AllowDuplicates|IgnoreExtras, Elements{ + "a": ContainSubstring("1"), + "b": ContainSubstring("1"), + }) + Expect(allElements).Should(m, "should match all elements") + Expect(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher") + Expect(extraElements).Should(m, "should allow extra keys") + Expect(missingElements).ShouldNot(m, "should reject missing keys") + Expect(nils).ShouldNot(m, "should reject an uninitialized slice") + }) + + It("should ignore missing and extras", func() { + m := MatchElements(nonUniqueID, AllowDuplicates|IgnoreExtras|IgnoreMissing, Elements{ + "a": ContainSubstring("1"), + "b": ContainSubstring("1"), + }) + Expect(allElements).Should(m, "should match all elements") + Expect(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher") + Expect(extraElements).Should(m, "should allow extra keys") + Expect(missingElements).Should(m, "should allow missing keys") + Expect(nils).Should(m, "should allow an uninitialized slice") + }) + }) +}) + +func id(element interface{}) string { + return element.(string) +} diff --git a/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go b/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go new file mode 100644 index 000000000..188492b21 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go @@ -0,0 +1,72 @@ +package errors + +import ( + "fmt" + "strings" + + "github.com/onsi/gomega/types" +) + +// A stateful matcher that nests other matchers within it and preserves the error types of the +// nested matcher failures. +type NestingMatcher interface { + types.GomegaMatcher + + // Returns the failures of nested matchers. + Failures() []error +} + +// An error type for labeling errors on deeply nested matchers. +type NestedError struct { + Path string + Err error +} + +func (e *NestedError) Error() string { + // Indent Errors. + indented := strings.Replace(e.Err.Error(), "\n", "\n\t", -1) + return fmt.Sprintf("%s:\n\t%v", e.Path, indented) +} + +// Create a NestedError with the given path. +// If err is a NestedError, prepend the path to it. +// If err is an AggregateError, recursively Nest each error. +func Nest(path string, err error) error { + if ag, ok := err.(AggregateError); ok { + var errs AggregateError + for _, e := range ag { + errs = append(errs, Nest(path, e)) + } + return errs + } + if ne, ok := err.(*NestedError); ok { + return &NestedError{ + Path: path + ne.Path, + Err: ne.Err, + } + } + return &NestedError{ + Path: path, + Err: err, + } +} + +// An error type for treating multiple errors as a single error. +type AggregateError []error + +// Error is part of the error interface. +func (err AggregateError) Error() string { + if len(err) == 0 { + // This should never happen, really. + return "" + } + if len(err) == 1 { + return err[0].Error() + } + result := fmt.Sprintf("[%s", err[0].Error()) + for i := 1; i < len(err); i++ { + result += fmt.Sprintf(", %s", err[i].Error()) + } + result += "]" + return result +} diff --git a/vendor/github.com/onsi/gomega/gstruct/fields.go b/vendor/github.com/onsi/gomega/gstruct/fields.go new file mode 100644 index 000000000..0020b873d --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/fields.go @@ -0,0 +1,141 @@ +package gstruct + +import ( + "errors" + "fmt" + "reflect" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/format" + errorsutil "github.com/onsi/gomega/gstruct/errors" + "github.com/onsi/gomega/types" +) + +//MatchAllFields succeeds if every field of a struct matches the field matcher associated with +//it, and every element matcher is matched. +// Expect([]string{"a", "b"}).To(MatchAllFields(gstruct.Fields{ +// "a": BeEqual("a"), +// "b": BeEqual("b"), +// }) +func MatchAllFields(fields Fields) types.GomegaMatcher { + return &FieldsMatcher{ + Fields: fields, + } +} + +//MatchFields succeeds if each element of a struct matches the field matcher associated with +//it. It can ignore extra fields and/or missing fields. +// Expect([]string{"a", "c"}).To(MatchFields(IgnoreMissing|IgnoreExtra, gstruct.Fields{ +// "a": BeEqual("a") +// "b": BeEqual("b"), +// }) +func MatchFields(options Options, fields Fields) types.GomegaMatcher { + return &FieldsMatcher{ + Fields: fields, + IgnoreExtras: options&IgnoreExtras != 0, + IgnoreMissing: options&IgnoreMissing != 0, + } +} + +type FieldsMatcher struct { + // Matchers for each field. + Fields Fields + + // Whether to ignore extra elements or consider it an error. + IgnoreExtras bool + // Whether to ignore missing elements or consider it an error. + IgnoreMissing bool + + // State. + failures []error +} + +// Field name to matcher. +type Fields map[string]types.GomegaMatcher + +func (m *FieldsMatcher) Match(actual interface{}) (success bool, err error) { + if reflect.TypeOf(actual).Kind() != reflect.Struct { + return false, fmt.Errorf("%v is type %T, expected struct", actual, actual) + } + + m.failures = m.matchFields(actual) + if len(m.failures) > 0 { + return false, nil + } + return true, nil +} + +func (m *FieldsMatcher) matchFields(actual interface{}) (errs []error) { + val := reflect.ValueOf(actual) + typ := val.Type() + fields := map[string]bool{} + for i := 0; i < val.NumField(); i++ { + fieldName := typ.Field(i).Name + fields[fieldName] = true + + err := func() (err error) { + // This test relies heavily on reflect, which tends to panic. + // Recover here to provide more useful error messages in that case. + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic checking %+v: %v\n%s", actual, r, debug.Stack()) + } + }() + + matcher, expected := m.Fields[fieldName] + if !expected { + if !m.IgnoreExtras { + return fmt.Errorf("unexpected field %s: %+v", fieldName, actual) + } + return nil + } + + var field interface{} + if val.Field(i).IsValid() { + field = val.Field(i).Interface() + } else { + field = reflect.Zero(typ.Field(i).Type) + } + + match, err := matcher.Match(field) + if err != nil { + return err + } else if !match { + if nesting, ok := matcher.(errorsutil.NestingMatcher); ok { + return errorsutil.AggregateError(nesting.Failures()) + } + return errors.New(matcher.FailureMessage(field)) + } + return nil + }() + if err != nil { + errs = append(errs, errorsutil.Nest("."+fieldName, err)) + } + } + + for field := range m.Fields { + if !fields[field] && !m.IgnoreMissing { + errs = append(errs, fmt.Errorf("missing expected field %s", field)) + } + } + + return errs +} + +func (m *FieldsMatcher) FailureMessage(actual interface{}) (message string) { + failures := make([]string, len(m.failures)) + for i := range m.failures { + failures[i] = m.failures[i].Error() + } + return format.Message(reflect.TypeOf(actual).Name(), + fmt.Sprintf("to match fields: {\n%v\n}\n", strings.Join(failures, "\n"))) +} + +func (m *FieldsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match fields") +} + +func (m *FieldsMatcher) Failures() []error { + return m.failures +} diff --git a/vendor/github.com/onsi/gomega/gstruct/fields_test.go b/vendor/github.com/onsi/gomega/gstruct/fields_test.go new file mode 100644 index 000000000..e4e039e21 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/fields_test.go @@ -0,0 +1,76 @@ +package gstruct_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" +) + +var _ = Describe("Struct", func() { + allFields := struct{ A, B string }{"a", "b"} + missingFields := struct{ A string }{"a"} + extraFields := struct{ A, B, C string }{"a", "b", "c"} + emptyFields := struct{ A, B string }{} + + It("should strictly match all fields", func() { + m := MatchAllFields(Fields{ + "B": Equal("b"), + "A": Equal("a"), + }) + Expect(allFields).Should(m, "should match all fields") + Expect(missingFields).ShouldNot(m, "should fail with missing fields") + Expect(extraFields).ShouldNot(m, "should fail with extra fields") + Expect(emptyFields).ShouldNot(m, "should fail with empty fields") + + m = MatchAllFields(Fields{ + "A": Equal("a"), + "B": Equal("fail"), + }) + Expect(allFields).ShouldNot(m, "should run nested matchers") + }) + + It("should handle empty structs", func() { + m := MatchAllFields(Fields{}) + Expect(struct{}{}).Should(m, "should handle empty structs") + Expect(allFields).ShouldNot(m, "should fail with extra fields") + }) + + It("should ignore missing fields", func() { + m := MatchFields(IgnoreMissing, Fields{ + "B": Equal("b"), + "A": Equal("a"), + }) + Expect(allFields).Should(m, "should match all fields") + Expect(missingFields).Should(m, "should ignore missing fields") + Expect(extraFields).ShouldNot(m, "should fail with extra fields") + Expect(emptyFields).ShouldNot(m, "should fail with empty fields") + }) + + It("should ignore extra fields", func() { + m := MatchFields(IgnoreExtras, Fields{ + "B": Equal("b"), + "A": Equal("a"), + }) + Expect(allFields).Should(m, "should match all fields") + Expect(missingFields).ShouldNot(m, "should fail with missing fields") + Expect(extraFields).Should(m, "should ignore extra fields") + Expect(emptyFields).ShouldNot(m, "should fail with empty fields") + }) + + It("should ignore missing and extra fields", func() { + m := MatchFields(IgnoreMissing|IgnoreExtras, Fields{ + "B": Equal("b"), + "A": Equal("a"), + }) + Expect(allFields).Should(m, "should match all fields") + Expect(missingFields).Should(m, "should ignore missing fields") + Expect(extraFields).Should(m, "should ignore extra fields") + Expect(emptyFields).ShouldNot(m, "should fail with empty fields") + + m = MatchFields(IgnoreMissing|IgnoreExtras, Fields{ + "A": Equal("a"), + "B": Equal("fail"), + }) + Expect(allFields).ShouldNot(m, "should run nested matchers") + }) +}) diff --git a/vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go b/vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go new file mode 100644 index 000000000..d47566304 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go @@ -0,0 +1,13 @@ +package gstruct_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func Test(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gstruct Suite") +} diff --git a/vendor/github.com/onsi/gomega/gstruct/ignore.go b/vendor/github.com/onsi/gomega/gstruct/ignore.go new file mode 100644 index 000000000..0365f32ad --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/ignore.go @@ -0,0 +1,37 @@ +package gstruct + +import ( + "github.com/onsi/gomega/types" +) + +//Ignore ignores the actual value and always succeeds. +// Expect(nil).To(Ignore()) +// Expect(true).To(Ignore()) +func Ignore() types.GomegaMatcher { + return &IgnoreMatcher{true} +} + +//Reject ignores the actual value and always fails. It can be used in conjunction with IgnoreMissing +//to catch problematic elements, or to verify tests are running. +// Expect(nil).NotTo(Reject()) +// Expect(true).NotTo(Reject()) +func Reject() types.GomegaMatcher { + return &IgnoreMatcher{false} +} + +// A matcher that either always succeeds or always fails. +type IgnoreMatcher struct { + Succeed bool +} + +func (m *IgnoreMatcher) Match(actual interface{}) (bool, error) { + return m.Succeed, nil +} + +func (m *IgnoreMatcher) FailureMessage(_ interface{}) (message string) { + return "Unconditional failure" +} + +func (m *IgnoreMatcher) NegatedFailureMessage(_ interface{}) (message string) { + return "Unconditional success" +} diff --git a/vendor/github.com/onsi/gomega/gstruct/ignore_test.go b/vendor/github.com/onsi/gomega/gstruct/ignore_test.go new file mode 100644 index 000000000..07775e742 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/ignore_test.go @@ -0,0 +1,23 @@ +package gstruct_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" +) + +var _ = Describe("Ignore", func() { + It("should always succeed", func() { + Expect(nil).Should(Ignore()) + Expect(struct{}{}).Should(Ignore()) + Expect(0).Should(Ignore()) + Expect(false).Should(Ignore()) + }) + + It("should always fail", func() { + Expect(nil).ShouldNot(Reject()) + Expect(struct{}{}).ShouldNot(Reject()) + Expect(1).ShouldNot(Reject()) + Expect(true).ShouldNot(Reject()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gstruct/pointer.go b/vendor/github.com/onsi/gomega/gstruct/pointer.go new file mode 100644 index 000000000..0a2f35de3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/pointer.go @@ -0,0 +1,56 @@ +package gstruct + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" +) + +//PointTo applies the given matcher to the value pointed to by actual. It fails if the pointer is +//nil. +// actual := 5 +// Expect(&actual).To(PointTo(Equal(5))) +func PointTo(matcher types.GomegaMatcher) types.GomegaMatcher { + return &PointerMatcher{ + Matcher: matcher, + } +} + +type PointerMatcher struct { + Matcher types.GomegaMatcher + + // Failure message. + failure string +} + +func (m *PointerMatcher) Match(actual interface{}) (bool, error) { + val := reflect.ValueOf(actual) + + // return error if actual type is not a pointer + if val.Kind() != reflect.Ptr { + return false, fmt.Errorf("PointerMatcher expects a pointer but we have '%s'", val.Kind()) + } + + if !val.IsValid() || val.IsNil() { + m.failure = format.Message(actual, "not to be ") + return false, nil + } + + // Forward the value. + elem := val.Elem().Interface() + match, err := m.Matcher.Match(elem) + if !match { + m.failure = m.Matcher.FailureMessage(elem) + } + return match, err +} + +func (m *PointerMatcher) FailureMessage(_ interface{}) (message string) { + return m.failure +} + +func (m *PointerMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(actual) +} diff --git a/vendor/github.com/onsi/gomega/gstruct/pointer_test.go b/vendor/github.com/onsi/gomega/gstruct/pointer_test.go new file mode 100644 index 000000000..805a92abe --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/pointer_test.go @@ -0,0 +1,33 @@ +package gstruct_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" +) + +var _ = Describe("PointTo", func() { + It("should fail when passed nil", func() { + var p *struct{} + Expect(p).Should(BeNil()) + }) + + It("should succeed when passed non-nil pointer", func() { + var s struct{} + Expect(&s).Should(PointTo(Ignore())) + }) + + It("should unwrap the pointee value", func() { + i := 1 + Expect(&i).Should(PointTo(Equal(1))) + Expect(&i).ShouldNot(PointTo(Equal(2))) + }) + + It("should work with nested pointers", func() { + i := 1 + ip := &i + ipp := &ip + Expect(ipp).Should(PointTo(PointTo(Equal(1)))) + Expect(ipp).ShouldNot(PointTo(PointTo(Equal(2)))) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gstruct/types.go b/vendor/github.com/onsi/gomega/gstruct/types.go new file mode 100644 index 000000000..48cbbe8f6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/types.go @@ -0,0 +1,15 @@ +package gstruct + +//Options is the type for options passed to some matchers. +type Options int + +const ( + //IgnoreExtras tells the matcher to ignore extra elements or fields, rather than triggering a failure. + IgnoreExtras Options = 1 << iota + //IgnoreMissing tells the matcher to ignore missing elements or fields, rather than triggering a failure. + IgnoreMissing + //AllowDuplicates tells the matcher to permit multiple members of the slice to produce the same ID when + //considered by the indentifier function. All members that map to a given key must still match successfully + //with the matcher that is provided for that key. + AllowDuplicates +) diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go new file mode 100644 index 000000000..b73673f21 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go @@ -0,0 +1,98 @@ +package assertion + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/types" +) + +type Assertion struct { + actualInput interface{} + fail types.GomegaFailHandler + offset int + extra []interface{} +} + +func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion { + return &Assertion{ + actualInput: actualInput, + fail: fail, + offset: offset, + extra: extra, + } +} + +func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + matches, err := matcher.Match(assertion.actualInput) + description := assertion.buildDescription(optionalDescription...) + if err != nil { + assertion.fail(description+err.Error(), 2+assertion.offset) + return false + } + if matches != desiredMatch { + var message string + if desiredMatch { + message = matcher.FailureMessage(assertion.actualInput) + } else { + message = matcher.NegatedFailureMessage(assertion.actualInput) + } + assertion.fail(description+message, 2+assertion.offset) + return false + } + + return true +} + +func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool { + success, message := vetExtras(assertion.extra) + if success { + return true + } + + description := assertion.buildDescription(optionalDescription...) + assertion.fail(description+message, 2+assertion.offset) + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go new file mode 100644 index 000000000..dae47a48b --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go @@ -0,0 +1,13 @@ +package assertion_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestAssertion(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Assertion Suite") +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go new file mode 100644 index 000000000..1334777bd --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go @@ -0,0 +1,252 @@ +package assertion_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/internal/assertion" + "github.com/onsi/gomega/internal/fakematcher" +) + +var _ = Describe("Assertion", func() { + var ( + a *Assertion + failureMessage string + failureCallerSkip int + matcher *fakematcher.FakeMatcher + ) + + input := "The thing I'm testing" + + var fakeFailHandler = func(message string, callerSkip ...int) { + failureMessage = message + if len(callerSkip) == 1 { + failureCallerSkip = callerSkip[0] + } + } + + BeforeEach(func() { + matcher = &fakematcher.FakeMatcher{} + failureMessage = "" + failureCallerSkip = 0 + a = New(input, fakeFailHandler, 1) + }) + + Context("when called", func() { + It("should pass the provided input value to the matcher", func() { + a.Should(matcher) + + Expect(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.ShouldNot(matcher) + + Expect(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.To(matcher) + + Expect(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.ToNot(matcher) + + Expect(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.NotTo(matcher) + + Expect(matcher.ReceivedActual).Should(Equal(input)) + }) + }) + + Context("when the matcher succeeds", func() { + BeforeEach(func() { + matcher.MatchesToReturn = true + matcher.ErrToReturn = nil + }) + + Context("and a positive assertion is being made", func() { + It("should not call the failure callback", func() { + a.Should(matcher) + Expect(failureMessage).Should(Equal("")) + }) + + It("should be true", func() { + Expect(a.Should(matcher)).Should(BeTrue()) + }) + }) + + Context("and a negative assertion is being made", func() { + It("should call the failure callback", func() { + a.ShouldNot(matcher) + Expect(failureMessage).Should(Equal("negative: The thing I'm testing")) + Expect(failureCallerSkip).Should(Equal(3)) + }) + + It("should be false", func() { + Expect(a.ShouldNot(matcher)).Should(BeFalse()) + }) + }) + }) + + Context("when the matcher fails", func() { + BeforeEach(func() { + matcher.MatchesToReturn = false + matcher.ErrToReturn = nil + }) + + Context("and a positive assertion is being made", func() { + It("should call the failure callback", func() { + a.Should(matcher) + Expect(failureMessage).Should(Equal("positive: The thing I'm testing")) + Expect(failureCallerSkip).Should(Equal(3)) + }) + + It("should be false", func() { + Expect(a.Should(matcher)).Should(BeFalse()) + }) + }) + + Context("and a negative assertion is being made", func() { + It("should not call the failure callback", func() { + a.ShouldNot(matcher) + Expect(failureMessage).Should(Equal("")) + }) + + It("should be true", func() { + Expect(a.ShouldNot(matcher)).Should(BeTrue()) + }) + }) + }) + + Context("When reporting a failure", func() { + BeforeEach(func() { + matcher.MatchesToReturn = false + matcher.ErrToReturn = nil + }) + + Context("and there is an optional description", func() { + It("should append the description to the failure message", func() { + a.Should(matcher, "A description") + Expect(failureMessage).Should(Equal("A description\npositive: The thing I'm testing")) + Expect(failureCallerSkip).Should(Equal(3)) + }) + }) + + Context("and there are multiple arguments to the optional description", func() { + It("should append the formatted description to the failure message", func() { + a.Should(matcher, "A description of [%d]", 3) + Expect(failureMessage).Should(Equal("A description of [3]\npositive: The thing I'm testing")) + Expect(failureCallerSkip).Should(Equal(3)) + }) + }) + }) + + Context("When the matcher returns an error", func() { + BeforeEach(func() { + matcher.ErrToReturn = errors.New("Kaboom!") + }) + + Context("and a positive assertion is being made", func() { + It("should call the failure callback", func() { + matcher.MatchesToReturn = true + a.Should(matcher) + Expect(failureMessage).Should(Equal("Kaboom!")) + Expect(failureCallerSkip).Should(Equal(3)) + }) + }) + + Context("and a negative assertion is being made", func() { + It("should call the failure callback", func() { + matcher.MatchesToReturn = false + a.ShouldNot(matcher) + Expect(failureMessage).Should(Equal("Kaboom!")) + Expect(failureCallerSkip).Should(Equal(3)) + }) + }) + + It("should always be false", func() { + Expect(a.Should(matcher)).Should(BeFalse()) + Expect(a.ShouldNot(matcher)).Should(BeFalse()) + }) + }) + + Context("when there are extra parameters", func() { + It("(a simple example)", func() { + Expect(func() (string, int, error) { + return "foo", 0, nil + }()).Should(Equal("foo")) + }) + + Context("when the parameters are all nil or zero", func() { + It("should invoke the matcher", func() { + matcher.MatchesToReturn = true + matcher.ErrToReturn = nil + + var typedNil []string + a = New(input, fakeFailHandler, 1, 0, nil, typedNil) + + result := a.Should(matcher) + Expect(result).Should(BeTrue()) + Expect(matcher.ReceivedActual).Should(Equal(input)) + + Expect(failureMessage).Should(BeZero()) + }) + }) + + Context("when any of the parameters are not nil or zero", func() { + It("should call the failure callback", func() { + matcher.MatchesToReturn = false + matcher.ErrToReturn = nil + + a = New(input, fakeFailHandler, 1, errors.New("foo")) + result := a.Should(matcher) + Expect(result).Should(BeFalse()) + Expect(matcher.ReceivedActual).Should(BeZero(), "The matcher doesn't even get called") + Expect(failureMessage).Should(ContainSubstring("foo")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 1) + result = a.ShouldNot(matcher) + Expect(result).Should(BeFalse()) + Expect(failureMessage).Should(ContainSubstring("1")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) + result = a.To(matcher) + Expect(result).Should(BeFalse()) + Expect(failureMessage).Should(ContainSubstring("foo")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) + result = a.ToNot(matcher) + Expect(result).Should(BeFalse()) + Expect(failureMessage).Should(ContainSubstring("foo")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) + result = a.NotTo(matcher) + Expect(result).Should(BeFalse()) + Expect(failureMessage).Should(ContainSubstring("foo")) + Expect(failureCallerSkip).Should(Equal(3)) + }) + }) + }) + + Context("Making an assertion without a registered fail handler", func() { + It("should panic", func() { + defer func() { + e := recover() + RegisterFailHandler(Fail) + if e == nil { + Fail("expected a panic to have occurred") + } + }() + + RegisterFailHandler(nil) + Expect(true).Should(BeTrue()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go new file mode 100644 index 000000000..bce085300 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go @@ -0,0 +1,189 @@ +package asyncassertion + +import ( + "errors" + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AsyncAssertionType uint + +const ( + AsyncAssertionTypeEventually AsyncAssertionType = iota + AsyncAssertionTypeConsistently +) + +type AsyncAssertion struct { + asyncType AsyncAssertionType + actualInput interface{} + timeoutInterval time.Duration + pollingInterval time.Duration + fail types.GomegaFailHandler + offset int +} + +func New(asyncType AsyncAssertionType, actualInput interface{}, fail types.GomegaFailHandler, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { + actualType := reflect.TypeOf(actualInput) + if actualType.Kind() == reflect.Func { + if actualType.NumIn() != 0 || actualType.NumOut() == 0 { + panic("Expected a function with no arguments and one or more return values.") + } + } + + return &AsyncAssertion{ + asyncType: asyncType, + actualInput: actualInput, + fail: fail, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + offset: offset, + } +} + +func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *AsyncAssertion) actualInputIsAFunction() bool { + actualType := reflect.TypeOf(assertion.actualInput) + return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0 +} + +func (assertion *AsyncAssertion) pollActual() (interface{}, error) { + if assertion.actualInputIsAFunction() { + values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{}) + + extras := []interface{}{} + for _, value := range values[1:] { + extras = append(extras, value.Interface()) + } + + success, message := vetExtras(extras) + + if !success { + return nil, errors.New(message) + } + + return values[0].Interface(), nil + } + + return assertion.actualInput, nil +} + +func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualInputIsAFunction() { + return true + } + + return oraclematcher.MatchMayChangeInTheFuture(matcher, value) +} + +func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + timer := time.Now() + timeout := time.After(assertion.timeoutInterval) + + description := assertion.buildDescription(optionalDescription...) + + var matches bool + var err error + mayChange := true + value, err := assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + + fail := func(preamble string) { + errMsg := "" + message := "" + if err != nil { + errMsg = "Error: " + err.Error() + } else { + if desiredMatch { + message = matcher.FailureMessage(value) + } else { + message = matcher.NegatedFailureMessage(value) + } + } + assertion.fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + } + + if assertion.asyncType == AsyncAssertionTypeEventually { + for { + if err == nil && matches == desiredMatch { + return true + } + + if !mayChange { + fail("No future change is possible. Bailing out early") + return false + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + fail("Timed out") + return false + } + } + } else if assertion.asyncType == AsyncAssertionTypeConsistently { + for { + if !(err == nil && matches == desiredMatch) { + fail("Failed") + return false + } + + if !mayChange { + return true + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + return true + } + } + } + + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go new file mode 100644 index 000000000..bdb0c3d22 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go @@ -0,0 +1,13 @@ +package asyncassertion_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestAsyncAssertion(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "AsyncAssertion Suite") +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go new file mode 100644 index 000000000..13ffae7c5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go @@ -0,0 +1,345 @@ +package asyncassertion_test + +import ( + "errors" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/internal/asyncassertion" +) + +var _ = Describe("Async Assertion", func() { + var ( + failureMessage string + callerSkip int + ) + + var fakeFailHandler = func(message string, skip ...int) { + failureMessage = message + callerSkip = skip[0] + } + + BeforeEach(func() { + failureMessage = "" + callerSkip = 0 + }) + + Describe("Eventually", func() { + Context("the positive case", func() { + It("should poll the function and matcher", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() int { + counter++ + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(BeNumerically("==", 5)) + Expect(failureMessage).Should(BeZero()) + }) + + It("should continue when the matcher errors", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() interface{} { + counter++ + if counter == 5 { + return "not-a-number" //this should cause the matcher to error + } + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(BeNumerically("==", 5), "My description %d", 2) + + Expect(failureMessage).Should(ContainSubstring("Timed out after")) + Expect(failureMessage).Should(ContainSubstring("My description 2")) + Expect(callerSkip).Should(Equal(4)) + }) + + It("should be able to timeout", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() int { + counter++ + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(BeNumerically(">", 100), "My description %d", 2) + + Expect(counter).Should(BeNumerically(">", 8)) + Expect(counter).Should(BeNumerically("<=", 10)) + Expect(failureMessage).Should(ContainSubstring("Timed out after")) + Expect(failureMessage).Should(MatchRegexp(`\: \d`), "Should pass the correct value to the matcher message formatter.") + Expect(failureMessage).Should(ContainSubstring("My description 2")) + Expect(callerSkip).Should(Equal(4)) + }) + }) + + Context("the negative case", func() { + It("should poll the function and matcher", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() int { + counter += 1 + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(BeNumerically("<", 3)) + + Expect(counter).Should(Equal(3)) + Expect(failureMessage).Should(BeZero()) + }) + + It("should timeout when the matcher errors", func() { + a := New(AsyncAssertionTypeEventually, func() interface{} { + return 0 //this should cause the matcher to error + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(HaveLen(0), "My description %d", 2) + + Expect(failureMessage).Should(ContainSubstring("Timed out after")) + Expect(failureMessage).Should(ContainSubstring("Error:")) + Expect(failureMessage).Should(ContainSubstring("My description 2")) + Expect(callerSkip).Should(Equal(4)) + }) + + It("should be able to timeout", func() { + a := New(AsyncAssertionTypeEventually, func() int { + return 0 + }, fakeFailHandler, time.Duration(0.1*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(Equal(0), "My description %d", 2) + + Expect(failureMessage).Should(ContainSubstring("Timed out after")) + Expect(failureMessage).Should(ContainSubstring(": 0"), "Should pass the correct value to the matcher message formatter.") + Expect(failureMessage).Should(ContainSubstring("My description 2")) + Expect(callerSkip).Should(Equal(4)) + }) + }) + + Context("with a function that returns multiple values", func() { + It("should eventually succeed if the additional arguments are nil", func() { + i := 0 + Eventually(func() (int, error) { + i++ + return i, nil + }).Should(Equal(10)) + }) + + It("should eventually timeout if the additional arguments are not nil", func() { + i := 0 + a := New(AsyncAssertionTypeEventually, func() (int, error) { + i++ + return i, errors.New("bam") + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + a.Should(Equal(2)) + + Expect(failureMessage).Should(ContainSubstring("Timed out after")) + Expect(failureMessage).Should(ContainSubstring("Error:")) + Expect(failureMessage).Should(ContainSubstring("bam")) + Expect(callerSkip).Should(Equal(4)) + }) + }) + + Context("Making an assertion without a registered fail handler", func() { + It("should panic", func() { + defer func() { + e := recover() + RegisterFailHandler(Fail) + if e == nil { + Fail("expected a panic to have occurred") + } + }() + + RegisterFailHandler(nil) + c := make(chan bool, 1) + c <- true + Eventually(c).Should(Receive()) + }) + }) + }) + + Describe("Consistently", func() { + Describe("The positive case", func() { + Context("when the matcher consistently passes for the duration", func() { + It("should pass", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() string { + calls++ + return "foo" + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(Equal("foo")) + Expect(calls).Should(BeNumerically(">", 8)) + Expect(calls).Should(BeNumerically("<=", 10)) + Expect(failureMessage).Should(BeZero()) + }) + }) + + Context("when the matcher fails at some point", func() { + It("should fail", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() interface{} { + calls++ + if calls > 5 { + return "bar" + } + return "foo" + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(Equal("foo")) + Expect(failureMessage).Should(ContainSubstring("to equal")) + Expect(callerSkip).Should(Equal(4)) + }) + }) + + Context("when the matcher errors at some point", func() { + It("should fail", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() interface{} { + calls++ + if calls > 5 { + return 3 + } + return []int{1, 2, 3} + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(HaveLen(3)) + Expect(failureMessage).Should(ContainSubstring("HaveLen matcher expects")) + Expect(callerSkip).Should(Equal(4)) + }) + }) + }) + + Describe("The negative case", func() { + Context("when the matcher consistently passes for the duration", func() { + It("should pass", func() { + c := make(chan bool) + a := New(AsyncAssertionTypeConsistently, c, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(Receive()) + Expect(failureMessage).Should(BeZero()) + }) + }) + + Context("when the matcher fails at some point", func() { + It("should fail", func() { + c := make(chan bool) + go func() { + time.Sleep(time.Duration(100 * time.Millisecond)) + c <- true + }() + + a := New(AsyncAssertionTypeConsistently, c, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(Receive()) + Expect(failureMessage).Should(ContainSubstring("not to receive anything")) + }) + }) + + Context("when the matcher errors at some point", func() { + It("should fail", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() interface{} { + calls++ + return calls + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(BeNumerically(">", 5)) + Expect(failureMessage).Should(ContainSubstring("not to be >")) + Expect(callerSkip).Should(Equal(4)) + }) + }) + }) + + Context("with a function that returns multiple values", func() { + It("should consistently succeed if the additional arguments are nil", func() { + i := 2 + Consistently(func() (int, error) { + i++ + return i, nil + }).Should(BeNumerically(">=", 2)) + }) + + It("should eventually timeout if the additional arguments are not nil", func() { + i := 2 + a := New(AsyncAssertionTypeEventually, func() (int, error) { + i++ + return i, errors.New("bam") + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + a.Should(BeNumerically(">=", 2)) + + Expect(failureMessage).Should(ContainSubstring("Error:")) + Expect(failureMessage).Should(ContainSubstring("bam")) + Expect(callerSkip).Should(Equal(4)) + }) + }) + + Context("Making an assertion without a registered fail handler", func() { + It("should panic", func() { + defer func() { + e := recover() + RegisterFailHandler(Fail) + if e == nil { + Fail("expected a panic to have occurred") + } + }() + + RegisterFailHandler(nil) + c := make(chan bool) + Consistently(c).ShouldNot(Receive()) + }) + }) + }) + + Context("when passed a function with the wrong # or arguments & returns", func() { + It("should panic", func() { + Expect(func() { + New(AsyncAssertionTypeEventually, func() {}, fakeFailHandler, 0, 0, 1) + }).Should(Panic()) + + Expect(func() { + New(AsyncAssertionTypeEventually, func(a string) int { return 0 }, fakeFailHandler, 0, 0, 1) + }).Should(Panic()) + + Expect(func() { + New(AsyncAssertionTypeEventually, func() int { return 0 }, fakeFailHandler, 0, 0, 1) + }).ShouldNot(Panic()) + + Expect(func() { + New(AsyncAssertionTypeEventually, func() (int, error) { return 0, nil }, fakeFailHandler, 0, 0, 1) + }).ShouldNot(Panic()) + }) + }) + + Describe("bailing early", func() { + Context("when actual is a value", func() { + It("Eventually should bail out and fail early if the matcher says to", func() { + c := make(chan bool) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(c, 0.1).Should(Receive()) + }) + Expect(time.Since(t)).Should(BeNumerically("<", 90*time.Millisecond)) + + Expect(failures).Should(HaveLen(1)) + }) + }) + + Context("when actual is a function", func() { + It("should never bail early", func() { + c := make(chan bool) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(func() chan bool { + return c + }, 0.1).Should(Receive()) + }) + Expect(time.Since(t)).Should(BeNumerically(">=", 90*time.Millisecond)) + + Expect(failures).Should(HaveLen(1)) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go b/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go new file mode 100644 index 000000000..6e351a7de --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go @@ -0,0 +1,23 @@ +package fakematcher + +import "fmt" + +type FakeMatcher struct { + ReceivedActual interface{} + MatchesToReturn bool + ErrToReturn error +} + +func (matcher *FakeMatcher) Match(actual interface{}) (bool, error) { + matcher.ReceivedActual = actual + + return matcher.MatchesToReturn, matcher.ErrToReturn +} + +func (matcher *FakeMatcher) FailureMessage(actual interface{}) string { + return fmt.Sprintf("positive: %v", actual) +} + +func (matcher *FakeMatcher) NegatedFailureMessage(actual interface{}) string { + return fmt.Sprintf("negative: %v", actual) +} diff --git a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go new file mode 100644 index 000000000..66cad88a1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go @@ -0,0 +1,25 @@ +package oraclematcher + +import "github.com/onsi/gomega/types" + +/* +GomegaMatchers that also match the OracleMatcher interface can convey information about +whether or not their result will change upon future attempts. + +This allows `Eventually` and `Consistently` to short circuit if success becomes impossible. + +For example, a process' exit code can never change. So, gexec's Exit matcher returns `true` +for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. +*/ +type OracleMatcher interface { + MatchMayChangeInTheFuture(actual interface{}) bool +} + +func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool { + oracleMatcher, ok := matcher.(OracleMatcher) + if !ok { + return true + } + + return oracleMatcher.MatchMayChangeInTheFuture(value) +} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go new file mode 100644 index 000000000..ac8912525 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go @@ -0,0 +1,40 @@ +package testingtsupport + +import ( + "regexp" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/types" +) + +type gomegaTestingT interface { + Fatalf(format string, args ...interface{}) +} + +func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler { + return func(message string, callerSkip ...int) { + skip := 1 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + stackTrace := pruneStack(string(debug.Stack()), skip) + t.Fatalf("\n%s\n%s", stackTrace, message) + } +} + +func pruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + if !re.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go new file mode 100644 index 000000000..9c6ccbdfd --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go @@ -0,0 +1,34 @@ +package testingtsupport_test + +import ( + . "github.com/onsi/gomega" + + "fmt" + "testing" +) + +type FakeT struct { + LastCall string +} + +func (f *FakeT) Fatalf(format string, args ...interface{}) { + f.LastCall = fmt.Sprintf(format, args...) +} + +func TestTestingT(t *testing.T) { + RegisterTestingT(t) + Expect(true).Should(BeTrue()) +} + +func TestGomegaWithT(t *testing.T) { + g := NewGomegaWithT(t) + + f := &FakeT{} + testG := NewGomegaWithT(f) + + testG.Expect("foo").To(Equal("foo")) + g.Expect(f.LastCall).To(BeZero()) + + testG.Expect("foo").To(Equal("bar")) + g.Expect(f.LastCall).To(ContainSubstring(": foo")) +} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go new file mode 100644 index 000000000..761ec33d8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -0,0 +1,427 @@ +package gomega + +import ( + "time" + + "github.com/onsi/gomega/matchers" + "github.com/onsi/gomega/types" +) + +//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about +//types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func Equal(expected interface{}) types.GomegaMatcher { + return &matchers.EqualMatcher{ + Expected: expected, + } +} + +//BeEquivalentTo is more lax than Equal, allowing equality between different types. +//This is done by converting actual to have the type of expected before +//attempting equality with reflect.DeepEqual. +//It is an error for actual and expected to be nil. Use BeNil() instead. +func BeEquivalentTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeEquivalentToMatcher{ + Expected: expected, + } +} + +//BeIdenticalTo uses the == operator to compare actual with expected. +//BeIdenticalTo is strict about types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func BeIdenticalTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeIdenticalToMatcher{ + Expected: expected, + } +} + +//BeNil succeeds if actual is nil +func BeNil() types.GomegaMatcher { + return &matchers.BeNilMatcher{} +} + +//BeTrue succeeds if actual is true +func BeTrue() types.GomegaMatcher { + return &matchers.BeTrueMatcher{} +} + +//BeFalse succeeds if actual is false +func BeFalse() types.GomegaMatcher { + return &matchers.BeFalseMatcher{} +} + +//HaveOccurred succeeds if actual is a non-nil error +//The typical Go error checking pattern looks like: +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) +func HaveOccurred() types.GomegaMatcher { + return &matchers.HaveOccurredMatcher{} +} + +//Succeed passes if actual is a nil error +//Succeed is intended to be used with functions that return a single error value. Instead of +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) +// +//You can write: +// Expect(SomethingThatMightFail()).Should(Succeed()) +// +//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect +//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. +//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. +func Succeed() types.GomegaMatcher { + return &matchers.SucceedMatcher{} +} + +//MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// +//These are valid use-cases: +// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" +// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// +//It is an error for err to be nil or an object that does not implement the Error interface +func MatchError(expected interface{}) types.GomegaMatcher { + return &matchers.MatchErrorMatcher{ + Expected: expected, + } +} + +//BeClosed succeeds if actual is a closed channel. +//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil +// +//In order to check whether or not the channel is closed, Gomega must try to read from the channel +//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about +//values coming down the channel. +// +//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before +//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). +// +//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. +func BeClosed() types.GomegaMatcher { + return &matchers.BeClosedMatcher{} +} + +//Receive succeeds if there is a value to be received on actual. +//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// +//Receive returns immediately and never blocks: +// +//- If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. +// +//If you have a go-routine running in the background that will write to channel `c` you can: +// Eventually(c).Should(Receive()) +// +//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) +// +//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: +// Consistently(c).ShouldNot(Receive()) +// +//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: +// Expect(c).Should(Receive(Equal("foo"))) +// +//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. +// +//Passing Receive a matcher is especially useful when paired with Eventually: +// +// Eventually(c).Should(Receive(ContainSubstring("bar"))) +// +//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. +// +//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing)) +// Expect(myThing.Sprocket).Should(Equal("foo")) +// Expect(myThing.IsValid()).Should(BeTrue()) +func Receive(args ...interface{}) types.GomegaMatcher { + var arg interface{} + if len(args) > 0 { + arg = args[0] + } + + return &matchers.ReceiveMatcher{ + Arg: arg, + } +} + +//BeSent succeeds if a value can be sent to actual. +//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. +//In addition, actual must not be closed. +// +//BeSent never blocks: +// +//- If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately +//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout +//- If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately +// +//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). +//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. +func BeSent(arg interface{}) types.GomegaMatcher { + return &matchers.BeSentMatcher{ + Arg: arg, + } +} + +//MatchRegexp succeeds if actual is a string or stringer that matches the +//passed-in regexp. Optional arguments can be provided to construct a regexp +//via fmt.Sprintf(). +func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { + return &matchers.MatchRegexpMatcher{ + Regexp: regexp, + Args: args, + } +} + +//ContainSubstring succeeds if actual is a string or stringer that contains the +//passed-in substring. Optional arguments can be provided to construct the substring +//via fmt.Sprintf(). +func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { + return &matchers.ContainSubstringMatcher{ + Substr: substr, + Args: args, + } +} + +//HavePrefix succeeds if actual is a string or stringer that contains the +//passed-in string as a prefix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HavePrefixMatcher{ + Prefix: prefix, + Args: args, + } +} + +//HaveSuffix succeeds if actual is a string or stringer that contains the +//passed-in string as a suffix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HaveSuffixMatcher{ + Suffix: suffix, + Args: args, + } +} + +//MatchJSON succeeds if actual is a string or stringer of JSON that matches +//the expected JSON. The JSONs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchJSON(json interface{}) types.GomegaMatcher { + return &matchers.MatchJSONMatcher{ + JSONToMatch: json, + } +} + +//MatchXML succeeds if actual is a string or stringer of XML that matches +//the expected XML. The XMLs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like whitespaces shouldn't matter. +func MatchXML(xml interface{}) types.GomegaMatcher { + return &matchers.MatchXMLMatcher{ + XMLToMatch: xml, + } +} + +//MatchYAML succeeds if actual is a string or stringer of YAML that matches +//the expected YAML. The YAML's are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchYAML(yaml interface{}) types.GomegaMatcher { + return &matchers.MatchYAMLMatcher{ + YAMLToMatch: yaml, + } +} + +//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. +func BeEmpty() types.GomegaMatcher { + return &matchers.BeEmptyMatcher{} +} + +//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. +func HaveLen(count int) types.GomegaMatcher { + return &matchers.HaveLenMatcher{ + Count: count, + } +} + +//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. +func HaveCap(count int) types.GomegaMatcher { + return &matchers.HaveCapMatcher{ + Count: count, + } +} + +//BeZero succeeds if actual is the zero value for its type or if actual is nil. +func BeZero() types.GomegaMatcher { + return &matchers.BeZeroMatcher{} +} + +//ContainElement succeeds if actual contains the passed in element. +//By default ContainElement() uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// +//Actual must be an array, slice or map. +//For maps, ContainElement searches through the map's values. +func ContainElement(element interface{}) types.GomegaMatcher { + return &matchers.ContainElementMatcher{ + Element: element, + } +} + +//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. +//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// +//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. +// +//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it +//is the only element passed in to ConsistOf: +// +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// +//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. +func ConsistOf(elements ...interface{}) types.GomegaMatcher { + return &matchers.ConsistOfMatcher{ + Elements: elements, + } +} + +//HaveKey succeeds if actual is a map with the passed in key. +//By default HaveKey uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) +func HaveKey(key interface{}) types.GomegaMatcher { + return &matchers.HaveKeyMatcher{ + Key: key, + } +} + +//HaveKeyWithValue succeeds if actual is a map with the passed in key and value. +//By default HaveKeyWithValue uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) +func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { + return &matchers.HaveKeyWithValueMatcher{ + Key: key, + Value: value, + } +} + +//BeNumerically performs numerical assertions in a type-agnostic way. +//Actual and expected should be numbers, though the specific type of +//number is irrelevant (float32, float64, uint8, etc...). +// +//There are six, self-explanatory, supported comparators: +// Expect(1.0).Should(BeNumerically("==", 1)) +// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01)) +// Expect(1.0).Should(BeNumerically(">", 0.9)) +// Expect(1.0).Should(BeNumerically(">=", 1.0)) +// Expect(1.0).Should(BeNumerically("<", 3)) +// Expect(1.0).Should(BeNumerically("<=", 1.0)) +func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { + return &matchers.BeNumericallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + } +} + +//BeTemporally compares time.Time's like BeNumerically +//Actual and expected must be time.Time. The comparators are the same as for BeNumerically +// Expect(time.Now()).Should(BeTemporally(">", time.Time{})) +// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) +func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher { + return &matchers.BeTemporallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + Threshold: threshold, + } +} + +//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. +//It will return an error when one of the values is nil. +// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values +// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type +// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type +// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) +func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { + return &matchers.AssignableToTypeOfMatcher{ + Expected: expected, + } +} + +//Panic succeeds if actual is a function that, when invoked, panics. +//Actual must be a function that takes no arguments and returns no results. +func Panic() types.GomegaMatcher { + return &matchers.PanicMatcher{} +} + +//BeAnExistingFile succeeds if a file exists. +//Actual must be a string representing the abs path to the file being checked. +func BeAnExistingFile() types.GomegaMatcher { + return &matchers.BeAnExistingFileMatcher{} +} + +//BeARegularFile succeeds if a file exists and is a regular file. +//Actual must be a string representing the abs path to the file being checked. +func BeARegularFile() types.GomegaMatcher { + return &matchers.BeARegularFileMatcher{} +} + +//BeADirectory succeeds if a file exists and is a directory. +//Actual must be a string representing the abs path to the file being checked. +func BeADirectory() types.GomegaMatcher { + return &matchers.BeADirectoryMatcher{} +} + +//And succeeds only if all of the given matchers succeed. +//The matchers are tried in order, and will fail-fast if one doesn't succeed. +// Expect("hi").To(And(HaveLen(2), Equal("hi")) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func And(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.AndMatcher{Matchers: ms} +} + +//SatisfyAll is an alias for And(). +// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) +func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return And(matchers...) +} + +//Or succeeds if any of the given matchers succeed. +//The matchers are tried in order and will return immediately upon the first successful match. +// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Or(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.OrMatcher{Matchers: ms} +} + +//SatisfyAny is an alias for Or(). +// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) +func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return Or(matchers...) +} + +//Not negates the given matcher; it succeeds if the given matcher fails. +// Expect(1).To(Not(Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Not(matcher types.GomegaMatcher) types.GomegaMatcher { + return &matchers.NotMatcher{Matcher: matcher} +} + +//WithTransform applies the `transform` to the actual value and matches it against `matcher`. +//The given transform must be a function of one parameter that returns one value. +// var plus1 = func(i int) int { return i + 1 } +// Expect(1).To(WithTransform(plus1, Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { + return matchers.NewWithTransformMatcher(transform, matcher) +} diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go new file mode 100644 index 000000000..d83a29164 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/and.go @@ -0,0 +1,63 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AndMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstFailedMatcher types.GomegaMatcher +} + +func (m *AndMatcher) Match(actual interface{}) (success bool, err error) { + m.firstFailedMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if !success || err != nil { + m.firstFailedMatcher = matcher + return false, err + } + } + return true, nil +} + +func (m *AndMatcher) FailureMessage(actual interface{}) (message string) { + return m.firstFailedMatcher.FailureMessage(actual) +} + +func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers)) +} + +func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: T, F, => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become T + + Match eval: T, T, T => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to F. + */ + + if m.firstFailedMatcher == nil { + // so all matchers succeeded.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } + // one of the matchers failed.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) +} diff --git a/vendor/github.com/onsi/gomega/matchers/and_test.go b/vendor/github.com/onsi/gomega/matchers/and_test.go new file mode 100644 index 000000000..acf778cd6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/and_test.go @@ -0,0 +1,103 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" + "github.com/onsi/gomega/types" +) + +// sample data +var ( + // example input + input = "hi" + // some matchers that succeed against the input + true1 = HaveLen(2) + true2 = Equal("hi") + true3 = MatchRegexp("hi") + // some matchers that fail against the input. + false1 = HaveLen(1) + false2 = Equal("hip") + false3 = MatchRegexp("hope") +) + +// verifyFailureMessage expects the matcher to fail with the given input, and verifies the failure message. +func verifyFailureMessage(m types.GomegaMatcher, input string, expectedFailureMsgFragment string) { + Expect(m.Match(input)).To(BeFalse()) + Expect(m.FailureMessage(input)).To(Equal( + "Expected\n : " + input + "\n" + expectedFailureMsgFragment)) +} + +var _ = Describe("AndMatcher", func() { + It("works with positive cases", func() { + Expect(input).To(And()) + Expect(input).To(And(true1)) + Expect(input).To(And(true1, true2)) + Expect(input).To(And(true1, true2, true3)) + + // use alias + Expect(input).To(SatisfyAll(true1, true2, true3)) + }) + + It("works with negative cases", func() { + Expect(input).ToNot(And(false1, false2)) + Expect(input).ToNot(And(true1, true2, false3)) + Expect(input).ToNot(And(true1, false2, false3)) + Expect(input).ToNot(And(false1, true1, true2)) + }) + + Context("failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(And(false1, true1), input, "to have length 1") + verifyFailureMessage(And(true1, false2), input, "to equal\n : hip") + verifyFailureMessage(And(true1, true2, false3), input, "to match regular expression\n : hope") + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(And(true1, true2)), input, + `To not satisfy all of these matchers: [%!s(*matchers.HaveLenMatcher=&{2}) %!s(*matchers.EqualMatcher=&{hi})]`) + }) + }) + }) + + Context("MatchMayChangeInTheFuture", func() { + Context("Match returned false", func() { + Context("returns value of the failed matcher", func() { + It("false if failed matcher not going to change", func() { + // 3 matchers: 1st returns true, 2nd returns false and is not going to change, 3rd is never called + m := And(Not(BeNil()), Or(), Equal(1)) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // empty Or() indicates not going to change + }) + It("true if failed matcher indicates it might change", func() { + // 3 matchers: 1st returns true, 2nd returns false and "might" change, 3rd is never called + m := And(Not(BeNil()), Equal(5), Equal(1)) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // Equal(5) indicates it might change + }) + }) + }) + Context("Match returned true", func() { + It("returns true if any of the matchers could change", func() { + // 3 matchers, all return true, and all could change + m := And(Not(BeNil()), Equal("hi"), HaveLen(2)) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // all 3 of these matchers default to 'true' + }) + It("returns false if none of the matchers could change", func() { + // empty And() has the property of always matching, and never can change since there are no sub-matchers that could change + m := And() + Expect(m.Match("anything")).To(BeTrue()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse()) + + // And() with 3 sub-matchers that return true, and can't change + m = And(And(), And(), And()) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // the 3 empty And()'s won't change + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go new file mode 100644 index 000000000..51f8be6ae --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go @@ -0,0 +1,35 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type AssignableToTypeOfMatcher struct { + Expected interface{} +} + +func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } else if matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare type to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } else if actual == nil { + return false, nil + } + + actualType := reflect.TypeOf(actual) + expectedType := reflect.TypeOf(matcher.Expected) + + return actualType.AssignableTo(expectedType), nil +} + +func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected)) +} + +func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected)) +} diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go new file mode 100644 index 000000000..471a46d97 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go @@ -0,0 +1,46 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("AssignableToTypeOf", func() { + Context("When asserting assignability between types", func() { + It("should do the right thing", func() { + Expect(0).Should(BeAssignableToTypeOf(0)) + Expect(5).Should(BeAssignableToTypeOf(-1)) + Expect("foo").Should(BeAssignableToTypeOf("bar")) + Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) + + Expect(0).ShouldNot(BeAssignableToTypeOf("bar")) + Expect(5).ShouldNot(BeAssignableToTypeOf(struct{ Foo string }{})) + Expect("foo").ShouldNot(BeAssignableToTypeOf(42)) + }) + }) + + Context("When asserting nil values", func() { + It("should error", func() { + success, err := (&AssignableToTypeOfMatcher{Expected: nil}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + + Context("When actual is nil and expected is not nil", func() { + It("should return false without error", func() { + success, err := (&AssignableToTypeOfMatcher{Expected: 17}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).ShouldNot(HaveOccurred()) + }) + }) + + Context("When actual is not nil and expected is nil", func() { + It("should error", func() { + success, err := (&AssignableToTypeOfMatcher{Expected: nil}).Match(17) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/attributes_slice.go b/vendor/github.com/onsi/gomega/matchers/attributes_slice.go new file mode 100644 index 000000000..355b362f4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/attributes_slice.go @@ -0,0 +1,14 @@ +package matchers + +import ( + "encoding/xml" + "strings" +) + +type attributesSlice []xml.Attr + +func (attrs attributesSlice) Len() int { return len(attrs) } +func (attrs attributesSlice) Less(i, j int) bool { + return strings.Compare(attrs[i].Name.Local, attrs[j].Name.Local) == -1 +} +func (attrs attributesSlice) Swap(i, j int) { attrs[i], attrs[j] = attrs[j], attrs[i] } diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go new file mode 100644 index 000000000..7b6975e41 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notADirectoryError struct { + os.FileInfo +} + +func (t notADirectoryError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.Mode().IsRegular(): + return "file is a regular file" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeADirectoryMatcher struct { + expected interface{} + err error +} + +func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsDir() { + matcher.err = notADirectoryError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err)) +} + +func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a directory")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go new file mode 100644 index 000000000..bc8742763 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go @@ -0,0 +1,40 @@ +package matchers_test + +import ( + "io/ioutil" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeADirectoryMatcher", func() { + Context("when passed a string", func() { + It("should do the right thing", func() { + Expect("/dne/test").ShouldNot(BeADirectory()) + + tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile") + Expect(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + Expect(tmpFile.Name()).ShouldNot(BeADirectory()) + + tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir") + Expect(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpDir) + Expect(tmpDir).Should(BeADirectory()) + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + success, err := (&BeADirectoryMatcher{}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeADirectoryMatcher{}).Match(true) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go new file mode 100644 index 000000000..e239131fb --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notARegularFileError struct { + os.FileInfo +} + +func (t notARegularFileError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.IsDir(): + return "file is a directory" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeARegularFileMatcher struct { + expected interface{} + err error +} + +func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsRegular() { + matcher.err = notARegularFileError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err)) +} + +func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a regular file")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go new file mode 100644 index 000000000..eae06a03e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go @@ -0,0 +1,40 @@ +package matchers_test + +import ( + "io/ioutil" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeARegularFileMatcher", func() { + Context("when passed a string", func() { + It("should do the right thing", func() { + Expect("/dne/test").ShouldNot(BeARegularFile()) + + tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile") + Expect(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + Expect(tmpFile.Name()).Should(BeARegularFile()) + + tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir") + Expect(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpDir) + Expect(tmpDir).ShouldNot(BeARegularFile()) + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + success, err := (&BeARegularFileMatcher{}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeARegularFileMatcher{}).Match(true) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go new file mode 100644 index 000000000..d42eba223 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go @@ -0,0 +1,38 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type BeAnExistingFileMatcher struct { + expected interface{} +} + +func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path") + } + + if _, err = os.Stat(actualFilename); err != nil { + switch { + case os.IsNotExist(err): + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to exist")) +} + +func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to exist")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go new file mode 100644 index 000000000..e28bd0d65 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go @@ -0,0 +1,40 @@ +package matchers_test + +import ( + "io/ioutil" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeAnExistingFileMatcher", func() { + Context("when passed a string", func() { + It("should do the right thing", func() { + Expect("/dne/test").ShouldNot(BeAnExistingFile()) + + tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile") + Expect(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + Expect(tmpFile.Name()).Should(BeAnExistingFile()) + + tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir") + Expect(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpDir) + Expect(tmpDir).Should(BeAnExistingFile()) + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + success, err := (&BeAnExistingFileMatcher{}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeAnExistingFileMatcher{}).Match(true) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go new file mode 100644 index 000000000..ed6f69288 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go @@ -0,0 +1,46 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeClosedMatcher struct { +} + +func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1)) + } + + winnerIndex, _, open := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var closed bool + if winnerIndex == 0 { + closed = !open + } else if winnerIndex == 1 { + closed = false + } + + return closed, nil +} + +func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be closed") +} + +func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be open") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go new file mode 100644 index 000000000..c2e49ab50 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go @@ -0,0 +1,70 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeClosedMatcher", func() { + Context("when passed a channel", func() { + It("should do the right thing", func() { + openChannel := make(chan bool) + Expect(openChannel).ShouldNot(BeClosed()) + + var openReaderChannel <-chan bool + openReaderChannel = openChannel + Expect(openReaderChannel).ShouldNot(BeClosed()) + + closedChannel := make(chan bool) + close(closedChannel) + + Expect(closedChannel).Should(BeClosed()) + + var closedReaderChannel <-chan bool + closedReaderChannel = closedChannel + Expect(closedReaderChannel).Should(BeClosed()) + }) + }) + + Context("when passed a send-only channel", func() { + It("should error", func() { + openChannel := make(chan bool) + var openWriterChannel chan<- bool + openWriterChannel = openChannel + + success, err := (&BeClosedMatcher{}).Match(openWriterChannel) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + closedChannel := make(chan bool) + close(closedChannel) + + var closedWriterChannel chan<- bool + closedWriterChannel = closedChannel + + success, err = (&BeClosedMatcher{}).Match(closedWriterChannel) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + var nilChannel chan bool + + success, err := (&BeClosedMatcher{}).Match(nilChannel) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeClosedMatcher{}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeClosedMatcher{}).Match(7) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go new file mode 100644 index 000000000..8b00311b0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type BeEmptyMatcher struct { +} + +func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == 0, nil +} + +func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be empty") +} + +func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be empty") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go new file mode 100644 index 000000000..132480cfc --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go @@ -0,0 +1,52 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeEmpty", func() { + Context("when passed a supported type", func() { + It("should do the right thing", func() { + Expect("").Should(BeEmpty()) + Expect(" ").ShouldNot(BeEmpty()) + + Expect([0]int{}).Should(BeEmpty()) + Expect([1]int{1}).ShouldNot(BeEmpty()) + + Expect([]int{}).Should(BeEmpty()) + Expect([]int{1}).ShouldNot(BeEmpty()) + + Expect(map[string]int{}).Should(BeEmpty()) + Expect(map[string]int{"a": 1}).ShouldNot(BeEmpty()) + + c := make(chan bool, 1) + Expect(c).Should(BeEmpty()) + c <- true + Expect(c).ShouldNot(BeEmpty()) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should be true", func() { + var nilSlice []int + Expect(nilSlice).Should(BeEmpty()) + + var nilMap map[int]string + Expect(nilMap).Should(BeEmpty()) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&BeEmptyMatcher{}).Match(0) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeEmptyMatcher{}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go new file mode 100644 index 000000000..97ab20a4e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go @@ -0,0 +1,34 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeEquivalentToMatcher struct { + Expected interface{} +} + +func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Both actual and expected must not be nil.") + } + + convertedActual := actual + + if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) { + convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface() + } + + return reflect.DeepEqual(convertedActual, matcher.Expected), nil +} + +func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be equivalent to", matcher.Expected) +} + +func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be equivalent to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go new file mode 100644 index 000000000..4d9d11d2d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go @@ -0,0 +1,50 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeEquivalentTo", func() { + Context("when asserting that nil is equivalent to nil", func() { + It("should error", func() { + success, err := (&BeEquivalentToMatcher{Expected: nil}).Match(nil) + + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("When asserting on nil", func() { + It("should do the right thing", func() { + Expect("foo").ShouldNot(BeEquivalentTo(nil)) + Expect(nil).ShouldNot(BeEquivalentTo(3)) + Expect([]int{1, 2}).ShouldNot(BeEquivalentTo(nil)) + }) + }) + + Context("When asserting on type aliases", func() { + It("should the right thing", func() { + Expect(StringAlias("foo")).Should(BeEquivalentTo("foo")) + Expect("foo").Should(BeEquivalentTo(StringAlias("foo"))) + Expect(StringAlias("foo")).ShouldNot(BeEquivalentTo("bar")) + Expect("foo").ShouldNot(BeEquivalentTo(StringAlias("bar"))) + }) + }) + + Context("When asserting on numbers", func() { + It("should convert actual to expected and do the right thing", func() { + Expect(5).Should(BeEquivalentTo(5)) + Expect(5.0).Should(BeEquivalentTo(5.0)) + Expect(5).Should(BeEquivalentTo(5.0)) + + Expect(5).ShouldNot(BeEquivalentTo("5")) + Expect(5).ShouldNot(BeEquivalentTo(3)) + + //Here be dragons! + Expect(5.1).Should(BeEquivalentTo(5)) + Expect(5).ShouldNot(BeEquivalentTo(5.1)) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go new file mode 100644 index 000000000..91d3b779e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -0,0 +1,26 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type BeFalseMatcher struct { +} + +func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual == false, nil +} + +func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be false") +} + +func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be false") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go new file mode 100644 index 000000000..25e70633d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go @@ -0,0 +1,20 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeFalse", func() { + It("should handle true and false correctly", func() { + Expect(true).ShouldNot(BeFalse()) + Expect(false).Should(BeFalse()) + }) + + It("should only support booleans", func() { + success, err := (&BeFalseMatcher{}).Match("foo") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go new file mode 100644 index 000000000..fdcda4d1f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go @@ -0,0 +1,37 @@ +package matchers + +import ( + "fmt" + "runtime" + + "github.com/onsi/gomega/format" +) + +type BeIdenticalToMatcher struct { + Expected interface{} +} + +func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + success = false + matchErr = nil + } + } + }() + + return actual == matcher.Expected, nil +} + +func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, "to be identical to", matcher.Expected) +} + +func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, "not to be identical to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go new file mode 100644 index 000000000..7fdd56eed --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go @@ -0,0 +1,61 @@ +package matchers_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeIdenticalTo", func() { + Context("when asserting that nil equals nil", func() { + It("should error", func() { + success, err := (&BeIdenticalToMatcher{Expected: nil}).Match(nil) + + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + It("should treat the same pointer to a struct as identical", func() { + mySpecialStruct := myCustomType{} + Expect(&mySpecialStruct).Should(BeIdenticalTo(&mySpecialStruct)) + Expect(&myCustomType{}).ShouldNot(BeIdenticalTo(&mySpecialStruct)) + }) + + It("should be strict about types", func() { + Expect(5).ShouldNot(BeIdenticalTo("5")) + Expect(5).ShouldNot(BeIdenticalTo(5.0)) + Expect(5).ShouldNot(BeIdenticalTo(3)) + }) + + It("should treat primtives as identical", func() { + Expect("5").Should(BeIdenticalTo("5")) + Expect("5").ShouldNot(BeIdenticalTo("55")) + + Expect(5.55).Should(BeIdenticalTo(5.55)) + Expect(5.55).ShouldNot(BeIdenticalTo(6.66)) + + Expect(5).Should(BeIdenticalTo(5)) + Expect(5).ShouldNot(BeIdenticalTo(55)) + }) + + It("should treat the same pointers to a slice as identical", func() { + mySlice := []int{1, 2} + Expect(&mySlice).Should(BeIdenticalTo(&mySlice)) + Expect(&mySlice).ShouldNot(BeIdenticalTo(&[]int{1, 2})) + }) + + It("should treat the same pointers to a map as identical", func() { + myMap := map[string]string{"a": "b", "c": "d"} + Expect(&myMap).Should(BeIdenticalTo(&myMap)) + Expect(myMap).ShouldNot(BeIdenticalTo(map[string]string{"a": "b", "c": "d"})) + }) + + It("should treat the same pointers to an error as identical", func() { + myError := errors.New("foo") + Expect(&myError).Should(BeIdenticalTo(&myError)) + Expect(errors.New("foo")).ShouldNot(BeIdenticalTo(errors.New("bar"))) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go new file mode 100644 index 000000000..7ee84fe1b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go @@ -0,0 +1,18 @@ +package matchers + +import "github.com/onsi/gomega/format" + +type BeNilMatcher struct { +} + +func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) { + return isNil(actual), nil +} + +func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be nil") +} + +func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be nil") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go new file mode 100644 index 000000000..c35aa3d7c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go @@ -0,0 +1,28 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BeNil", func() { + It("should succeed when passed nil", func() { + Expect(nil).Should(BeNil()) + }) + + It("should succeed when passed a typed nil", func() { + var a []int + Expect(a).Should(BeNil()) + }) + + It("should succeed when passing nil pointer", func() { + var f *struct{} + Expect(f).Should(BeNil()) + }) + + It("should not succeed when not passed nil", func() { + Expect(0).ShouldNot(BeNil()) + Expect(false).ShouldNot(BeNil()) + Expect("").ShouldNot(BeNil()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go new file mode 100644 index 000000000..0c157f61b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -0,0 +1,120 @@ +package matchers + +import ( + "fmt" + "math" + + "github.com/onsi/gomega/format" +) + +type BeNumericallyMatcher struct { + Comparator string + CompareTo []interface{} +} + +func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo[0]) +} + +func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo[0]) +} + +func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) { + if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 { + return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1)) + } + if !isNumber(actual) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1)) + } + if !isNumber(matcher.CompareTo[0]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + if isFloat(actual) || isFloat(matcher.CompareTo[0]) { + var secondOperand float64 = 1e-8 + if len(matcher.CompareTo) == 2 { + secondOperand = toFloat(matcher.CompareTo[1]) + } + success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand) + } else if isInteger(actual) { + var secondOperand int64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toInteger(matcher.CompareTo[1]) + } + success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand) + } else if isUnsignedInteger(actual) { + var secondOperand uint64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toUnsignedInteger(matcher.CompareTo[1]) + } + success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand) + } else { + return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1)) + } + + return success, nil +} + +func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) { + switch matcher.Comparator { + case "==", "~": + diff := actual - compareTo + return -threshold <= diff && diff <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) { + switch matcher.Comparator { + case "==", "~": + if actual < compareTo { + actual, compareTo = compareTo, actual + } + return actual-compareTo <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) { + switch matcher.Comparator { + case "~": + return math.Abs(actual-compareTo) <= threshold + case "==": + return (actual == compareTo) + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go new file mode 100644 index 000000000..bd385b7f5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go @@ -0,0 +1,148 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeNumerically", func() { + Context("when passed a number", func() { + It("should support ==", func() { + Expect(uint32(5)).Should(BeNumerically("==", 5)) + Expect(float64(5.0)).Should(BeNumerically("==", 5)) + Expect(int8(5)).Should(BeNumerically("==", 5)) + }) + + It("should not have false positives", func() { + Expect(5.1).ShouldNot(BeNumerically("==", 5)) + Expect(5).ShouldNot(BeNumerically("==", 5.1)) + }) + + It("should support >", func() { + Expect(uint32(5)).Should(BeNumerically(">", 4)) + Expect(float64(5.0)).Should(BeNumerically(">", 4.9)) + Expect(int8(5)).Should(BeNumerically(">", 4)) + + Expect(uint32(5)).ShouldNot(BeNumerically(">", 5)) + Expect(float64(5.0)).ShouldNot(BeNumerically(">", 5.0)) + Expect(int8(5)).ShouldNot(BeNumerically(">", 5)) + }) + + It("should support <", func() { + Expect(uint32(5)).Should(BeNumerically("<", 6)) + Expect(float64(5.0)).Should(BeNumerically("<", 5.1)) + Expect(int8(5)).Should(BeNumerically("<", 6)) + + Expect(uint32(5)).ShouldNot(BeNumerically("<", 5)) + Expect(float64(5.0)).ShouldNot(BeNumerically("<", 5.0)) + Expect(int8(5)).ShouldNot(BeNumerically("<", 5)) + }) + + It("should support >=", func() { + Expect(uint32(5)).Should(BeNumerically(">=", 4)) + Expect(float64(5.0)).Should(BeNumerically(">=", 4.9)) + Expect(int8(5)).Should(BeNumerically(">=", 4)) + + Expect(uint32(5)).Should(BeNumerically(">=", 5)) + Expect(float64(5.0)).Should(BeNumerically(">=", 5.0)) + Expect(int8(5)).Should(BeNumerically(">=", 5)) + + Expect(uint32(5)).ShouldNot(BeNumerically(">=", 6)) + Expect(float64(5.0)).ShouldNot(BeNumerically(">=", 5.1)) + Expect(int8(5)).ShouldNot(BeNumerically(">=", 6)) + }) + + It("should support <=", func() { + Expect(uint32(5)).Should(BeNumerically("<=", 6)) + Expect(float64(5.0)).Should(BeNumerically("<=", 5.1)) + Expect(int8(5)).Should(BeNumerically("<=", 6)) + + Expect(uint32(5)).Should(BeNumerically("<=", 5)) + Expect(float64(5.0)).Should(BeNumerically("<=", 5.0)) + Expect(int8(5)).Should(BeNumerically("<=", 5)) + + Expect(uint32(5)).ShouldNot(BeNumerically("<=", 4)) + Expect(float64(5.0)).ShouldNot(BeNumerically("<=", 4.9)) + Expect(int8(5)).Should(BeNumerically("<=", 5)) + }) + + Context("when passed ~", func() { + Context("when passed a float", func() { + Context("and there is no precision parameter", func() { + It("should default to 1e-8", func() { + Expect(5.00000001).Should(BeNumerically("~", 5.00000002)) + Expect(5.00000001).ShouldNot(BeNumerically("~", 5.0000001)) + }) + }) + + Context("and there is a precision parameter", func() { + It("should use the precision parameter", func() { + Expect(5.1).Should(BeNumerically("~", 5.19, 0.1)) + Expect(5.1).Should(BeNumerically("~", 5.01, 0.1)) + Expect(5.1).ShouldNot(BeNumerically("~", 5.22, 0.1)) + Expect(5.1).ShouldNot(BeNumerically("~", 4.98, 0.1)) + }) + }) + }) + + Context("when passed an int/uint", func() { + Context("and there is no precision parameter", func() { + It("should just do strict equality", func() { + Expect(5).Should(BeNumerically("~", 5)) + Expect(5).ShouldNot(BeNumerically("~", 6)) + Expect(uint(5)).ShouldNot(BeNumerically("~", 6)) + }) + }) + + Context("and there is a precision parameter", func() { + It("should use precision paramter", func() { + Expect(5).Should(BeNumerically("~", 6, 2)) + Expect(5).ShouldNot(BeNumerically("~", 8, 2)) + Expect(uint(5)).Should(BeNumerically("~", 6, 1)) + }) + }) + }) + }) + }) + + Context("when passed a non-number", func() { + It("should error", func() { + success, err := (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{5}}).Match("foo") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "=="}).Match(5) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "~", CompareTo: []interface{}{3.0, "foo"}}).Match(5.0) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{"bar"}}).Match(5) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{"bar"}}).Match("foo") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{nil}}).Match(0) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{0}}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when passed an unsupported comparator", func() { + It("should error", func() { + success, err := (&BeNumericallyMatcher{Comparator: "!=", CompareTo: []interface{}{5}}).Match(4) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go new file mode 100644 index 000000000..d7c32233e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go @@ -0,0 +1,71 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeSentMatcher struct { + Arg interface{} + channelClosed bool +} + +func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.RecvDir { + return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1)) + } + + argType := reflect.TypeOf(matcher.Arg) + assignable := argType.AssignableTo(channelType.Elem()) + + if !assignable { + return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1)) + } + + argValue := reflect.ValueOf(matcher.Arg) + + defer func() { + if e := recover(); e != nil { + success = false + err = fmt.Errorf("Cannot send to a closed channel") + matcher.channelClosed = true + } + }() + + winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var didSend bool + if winnerIndex == 0 { + didSend = true + } + + return didSend, nil +} + +func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go new file mode 100644 index 000000000..68ec72879 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go @@ -0,0 +1,107 @@ +package matchers_test + +import ( + "time" + + . "github.com/onsi/gomega/matchers" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BeSent", func() { + Context("when passed a channel and a matching type", func() { + Context("when the channel is ready to receive", func() { + It("should succeed and send the value down the channel", func() { + c := make(chan string) + d := make(chan string) + go func() { + val := <-c + d <- val + }() + + time.Sleep(10 * time.Millisecond) + + Expect(c).Should(BeSent("foo")) + Eventually(d).Should(Receive(Equal("foo"))) + }) + + It("should succeed (with a buffered channel)", func() { + c := make(chan string, 1) + Expect(c).Should(BeSent("foo")) + Expect(<-c).Should(Equal("foo")) + }) + }) + + Context("when the channel is not ready to receive", func() { + It("should fail and not send down the channel", func() { + c := make(chan string) + Expect(c).ShouldNot(BeSent("foo")) + Consistently(c).ShouldNot(Receive()) + }) + }) + + Context("when the channel is eventually ready to receive", func() { + It("should succeed", func() { + c := make(chan string) + d := make(chan string) + go func() { + time.Sleep(30 * time.Millisecond) + val := <-c + d <- val + }() + + Eventually(c).Should(BeSent("foo")) + Eventually(d).Should(Receive(Equal("foo"))) + }) + }) + + Context("when the channel is closed", func() { + It("should error", func() { + c := make(chan string) + close(c) + success, err := (&BeSentMatcher{Arg: "foo"}).Match(c) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + + It("should short-circuit Eventually", func() { + c := make(chan string) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(c, 10.0).Should(BeSent("foo")) + }) + Expect(failures).Should(HaveLen(1)) + Expect(time.Since(t)).Should(BeNumerically("<", time.Second)) + }) + }) + }) + + Context("when passed a channel and a non-matching type", func() { + It("should error", func() { + success, err := (&BeSentMatcher{Arg: "foo"}).Match(make(chan int, 1)) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a receive-only channel", func() { + It("should error", func() { + var c <-chan string + c = make(chan string, 1) + success, err := (&BeSentMatcher{Arg: "foo"}).Match(c) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a nonchannel", func() { + It("should error", func() { + success, err := (&BeSentMatcher{Arg: "foo"}).Match("bar") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go new file mode 100644 index 000000000..cb7c038ef --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go @@ -0,0 +1,66 @@ +package matchers + +import ( + "fmt" + "time" + + "github.com/onsi/gomega/format" +) + +type BeTemporallyMatcher struct { + Comparator string + CompareTo time.Time + Threshold []time.Duration +} + +func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) { + // predicate to test for time.Time type + isTime := func(t interface{}) bool { + _, ok := t.(time.Time) + return ok + } + + if !isTime(actual) { + return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + var threshold = time.Millisecond + if len(matcher.Threshold) == 1 { + threshold = matcher.Threshold[0] + } + + return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil +} + +func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) { + switch matcher.Comparator { + case "==": + return actual.Equal(compareTo) + case "~": + diff := actual.Sub(compareTo) + return -threshold <= diff && diff <= threshold + case ">": + return actual.After(compareTo) + case ">=": + return !actual.Before(compareTo) + case "<": + return actual.Before(compareTo) + case "<=": + return !actual.After(compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go new file mode 100644 index 000000000..95a3a103e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go @@ -0,0 +1,99 @@ +package matchers_test + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeTemporally", func() { + + var t0, t1, t2 time.Time + BeforeEach(func() { + t0 = time.Now() + t1 = t0.Add(time.Second) + t2 = t0.Add(-time.Second) + }) + + Context("When comparing times", func() { + + It("should support ==", func() { + Expect(t0).Should(BeTemporally("==", t0)) + Expect(t1).ShouldNot(BeTemporally("==", t0)) + Expect(t0).ShouldNot(BeTemporally("==", t1)) + Expect(t0).ShouldNot(BeTemporally("==", time.Time{})) + }) + + It("should support >", func() { + Expect(t0).Should(BeTemporally(">", t2)) + Expect(t0).ShouldNot(BeTemporally(">", t0)) + Expect(t2).ShouldNot(BeTemporally(">", t0)) + }) + + It("should support <", func() { + Expect(t0).Should(BeTemporally("<", t1)) + Expect(t0).ShouldNot(BeTemporally("<", t0)) + Expect(t1).ShouldNot(BeTemporally("<", t0)) + }) + + It("should support >=", func() { + Expect(t0).Should(BeTemporally(">=", t2)) + Expect(t0).Should(BeTemporally(">=", t0)) + Expect(t0).ShouldNot(BeTemporally(">=", t1)) + }) + + It("should support <=", func() { + Expect(t0).Should(BeTemporally("<=", t1)) + Expect(t0).Should(BeTemporally("<=", t0)) + Expect(t0).ShouldNot(BeTemporally("<=", t2)) + }) + + Context("when passed ~", func() { + Context("and there is no precision parameter", func() { + BeforeEach(func() { + t1 = t0.Add(time.Millisecond / 2) + t2 = t0.Add(-2 * time.Millisecond) + }) + It("should approximate", func() { + Expect(t0).Should(BeTemporally("~", t0)) + Expect(t0).Should(BeTemporally("~", t1)) + Expect(t0).ShouldNot(BeTemporally("~", t2)) + }) + }) + + Context("and there is a precision parameter", func() { + BeforeEach(func() { + t2 = t0.Add(3 * time.Second) + }) + It("should use precision paramter", func() { + d := 2 * time.Second + Expect(t0).Should(BeTemporally("~", t0, d)) + Expect(t0).Should(BeTemporally("~", t1, d)) + Expect(t0).ShouldNot(BeTemporally("~", t2, d)) + }) + }) + }) + }) + + Context("when passed a non-time", func() { + It("should error", func() { + success, err := (&BeTemporallyMatcher{Comparator: "==", CompareTo: t0}).Match("foo") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeTemporallyMatcher{Comparator: "=="}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when passed an unsupported comparator", func() { + It("should error", func() { + success, err := (&BeTemporallyMatcher{Comparator: "!=", CompareTo: t0}).Match(t2) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go new file mode 100644 index 000000000..ec57c5db4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -0,0 +1,26 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type BeTrueMatcher struct { +} + +func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual.(bool), nil +} + +func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be true") +} + +func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be true") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go new file mode 100644 index 000000000..9eda62c33 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go @@ -0,0 +1,20 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeTrue", func() { + It("should handle true and false correctly", func() { + Expect(true).Should(BeTrue()) + Expect(false).ShouldNot(BeTrue()) + }) + + It("should only support booleans", func() { + success, err := (&BeTrueMatcher{}).Match("foo") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go new file mode 100644 index 000000000..26196f168 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeZeroMatcher struct { +} + +func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return true, nil + } + zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() + + return reflect.DeepEqual(zeroValue, actual), nil + +} + +func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be zero-valued") +} + +func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be zero-valued") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go new file mode 100644 index 000000000..015d87d1c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go @@ -0,0 +1,30 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BeZero", func() { + It("should succeed if the passed in object is the zero value for its type", func() { + Expect(nil).Should(BeZero()) + + Expect("").Should(BeZero()) + Expect(" ").ShouldNot(BeZero()) + + Expect(0).Should(BeZero()) + Expect(1).ShouldNot(BeZero()) + + Expect(0.0).Should(BeZero()) + Expect(0.1).ShouldNot(BeZero()) + + // Expect([]int{}).Should(BeZero()) + Expect([]int{1}).ShouldNot(BeZero()) + + // Expect(map[string]int{}).Should(BeZero()) + Expect(map[string]int{"a": 1}).ShouldNot(BeZero()) + + Expect(myCustomType{}).Should(BeZero()) + Expect(myCustomType{s: "a"}).ShouldNot(BeZero()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go new file mode 100644 index 000000000..7b0e08868 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -0,0 +1,80 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" +) + +type ConsistOfMatcher struct { + Elements []interface{} +} + +func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elements := matcher.Elements + if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) { + elements = []interface{}{} + value := reflect.ValueOf(matcher.Elements[0]) + for i := 0; i < value.Len(); i++ { + elements = append(elements, value.Index(i).Interface()) + } + } + + matchers := []interface{}{} + for _, element := range elements { + matcher, isMatcher := element.(omegaMatcher) + if !isMatcher { + matcher = &EqualMatcher{Expected: element} + } + matchers = append(matchers, matcher) + } + + values := matcher.valuesOf(actual) + + if len(values) != len(matchers) { + return false, nil + } + + neighbours := func(v, m interface{}) (bool, error) { + match, err := m.(omegaMatcher).Match(v) + return match && err == nil, nil + } + + bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours) + if err != nil { + return false, err + } + + return len(bipartiteGraph.LargestMatching()) == len(values), nil +} + +func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} { + value := reflect.ValueOf(actual) + values := []interface{}{} + if isMap(actual) { + keys := value.MapKeys() + for i := 0; i < value.Len(); i++ { + values = append(values, value.MapIndex(keys[i]).Interface()) + } + } else { + for i := 0; i < value.Len(); i++ { + values = append(values, value.Index(i).Interface()) + } + } + + return values +} + +func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to consist of", matcher.Elements) +} + +func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to consist of", matcher.Elements) +} diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of_test.go b/vendor/github.com/onsi/gomega/matchers/consist_of_test.go new file mode 100644 index 000000000..081830f5c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/consist_of_test.go @@ -0,0 +1,75 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("ConsistOf", func() { + Context("with a slice", func() { + It("should do the right thing", func() { + Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf("baz", "bar", "foo")) + Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) + Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "foo")) + }) + }) + + Context("with an array", func() { + It("should do the right thing", func() { + Expect([3]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Expect([3]string{"foo", "bar", "baz"}).Should(ConsistOf("baz", "bar", "foo")) + Expect([3]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) + Expect([3]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "foo")) + }) + }) + + Context("with a map", func() { + It("should apply to the values", func() { + Expect(map[int]string{1: "foo", 2: "bar", 3: "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Expect(map[int]string{1: "foo", 2: "bar", 3: "baz"}).Should(ConsistOf("baz", "bar", "foo")) + Expect(map[int]string{1: "foo", 2: "bar", 3: "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) + Expect(map[int]string{1: "foo", 2: "bar", 3: "baz"}).ShouldNot(ConsistOf("baz", "foo")) + }) + + }) + + Context("with anything else", func() { + It("should error", func() { + failures := InterceptGomegaFailures(func() { + Expect("foo").Should(ConsistOf("f", "o", "o")) + }) + + Expect(failures).Should(HaveLen(1)) + }) + }) + + Context("when passed matchers", func() { + It("should pass if the matchers pass", func() { + Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", MatchRegexp("^ba"), "baz")) + Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"))) + Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("foo"))) + Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("^ba"))) + Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("turducken"))) + }) + + It("should not depend on the order of the matchers", func() { + Expect([][]int{[]int{1, 2}, []int{2}}).Should(ConsistOf(ContainElement(1), ContainElement(2))) + Expect([][]int{[]int{1, 2}, []int{2}}).Should(ConsistOf(ContainElement(2), ContainElement(1))) + }) + + Context("when a matcher errors", func() { + It("should soldier on", func() { + Expect([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf(BeFalse(), "foo", "bar")) + Expect([]interface{}{"foo", "bar", false}).Should(ConsistOf(BeFalse(), ContainSubstring("foo"), "bar")) + }) + }) + }) + + Context("when passed exactly one argument, and that argument is a slice", func() { + It("should match against the elements of that argument", func() { + Expect([]string{"foo", "bar", "baz"}).Should(ConsistOf([]string{"foo", "bar", "baz"})) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go new file mode 100644 index 000000000..4159335d0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -0,0 +1,56 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ContainElementMatcher struct { + Element interface{} +} + +func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) + if !elementIsMatcher { + elemMatcher = &EqualMatcher{Expected: matcher.Element} + } + + value := reflect.ValueOf(actual) + var keys []reflect.Value + if isMap(actual) { + keys = value.MapKeys() + } + var lastError error + for i := 0; i < value.Len(); i++ { + var success bool + var err error + if isMap(actual) { + success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface()) + } else { + success, err = elemMatcher.Match(value.Index(i).Interface()) + } + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain element matching", matcher.Element) +} + +func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain element matching", matcher.Element) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go new file mode 100644 index 000000000..60fb55e96 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go @@ -0,0 +1,76 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("ContainElement", func() { + Context("when passed a supported type", func() { + Context("and expecting a non-matcher", func() { + It("should do the right thing", func() { + Expect([2]int{1, 2}).Should(ContainElement(2)) + Expect([2]int{1, 2}).ShouldNot(ContainElement(3)) + + Expect([]int{1, 2}).Should(ContainElement(2)) + Expect([]int{1, 2}).ShouldNot(ContainElement(3)) + + Expect(map[string]int{"foo": 1, "bar": 2}).Should(ContainElement(2)) + Expect(map[int]int{3: 1, 4: 2}).ShouldNot(ContainElement(3)) + + arr := make([]myCustomType, 2) + arr[0] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}} + arr[1] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "c"}} + Expect(arr).Should(ContainElement(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}})) + Expect(arr).ShouldNot(ContainElement(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"b", "c"}})) + }) + }) + + Context("and expecting a matcher", func() { + It("should pass each element through the matcher", func() { + Expect([]int{1, 2, 3}).Should(ContainElement(BeNumerically(">=", 3))) + Expect([]int{1, 2, 3}).ShouldNot(ContainElement(BeNumerically(">", 3))) + Expect(map[string]int{"foo": 1, "bar": 2}).Should(ContainElement(BeNumerically(">=", 2))) + Expect(map[string]int{"foo": 1, "bar": 2}).ShouldNot(ContainElement(BeNumerically(">", 2))) + }) + + It("should power through even if the matcher ever fails", func() { + Expect([]interface{}{1, 2, "3", 4}).Should(ContainElement(BeNumerically(">=", 3))) + }) + + It("should fail if the matcher fails", func() { + actual := []interface{}{1, 2, "3", "4"} + success, err := (&ContainElementMatcher{Element: BeNumerically(">=", 3)}).Match(actual) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilSlice []int + Expect(nilSlice).ShouldNot(ContainElement(1)) + + var nilMap map[int]string + Expect(nilMap).ShouldNot(ContainElement("foo")) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&ContainElementMatcher{Element: 0}).Match(0) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&ContainElementMatcher{Element: 0}).Match("abc") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&ContainElementMatcher{Element: 0}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go new file mode 100644 index 000000000..f8dc41e74 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go @@ -0,0 +1,38 @@ +package matchers + +import ( + "fmt" + "strings" + + "github.com/onsi/gomega/format" +) + +type ContainSubstringMatcher struct { + Substr string + Args []interface{} +} + +func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + + return strings.Contains(actualString, matcher.stringToMatch()), nil +} + +func (matcher *ContainSubstringMatcher) stringToMatch() string { + stringToMatch := matcher.Substr + if len(matcher.Args) > 0 { + stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...) + } + return stringToMatch +} + +func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain substring", matcher.stringToMatch()) +} + +func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain substring", matcher.stringToMatch()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go new file mode 100644 index 000000000..efffb4732 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go @@ -0,0 +1,36 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("ContainSubstringMatcher", func() { + Context("when actual is a string", func() { + It("should match against the string", func() { + Expect("Marvelous").Should(ContainSubstring("rve")) + Expect("Marvelous").ShouldNot(ContainSubstring("boo")) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Expect("Marvelous3").Should(ContainSubstring("velous%d", 3)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match agains the returned string", func() { + Expect(&myStringer{a: "Abc3"}).Should(ContainSubstring("bc3")) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&ContainSubstringMatcher{Substr: "2"}).Match(2) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go new file mode 100644 index 000000000..befb7bdfd --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go @@ -0,0 +1,42 @@ +package matchers + +import ( + "bytes" + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type EqualMatcher struct { + Expected interface{} +} + +func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + // Shortcut for byte slices. + // Comparing long byte slices with reflect.DeepEqual is very slow, + // so use bytes.Equal if actual and expected are both byte slices. + if actualByteSlice, ok := actual.([]byte); ok { + if expectedByteSlice, ok := matcher.Expected.([]byte); ok { + return bytes.Equal(actualByteSlice, expectedByteSlice), nil + } + } + return reflect.DeepEqual(actual, matcher.Expected), nil +} + +func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) { + actualString, actualOK := actual.(string) + expectedString, expectedOK := matcher.Expected.(string) + if actualOK && expectedOK { + return format.MessageWithDiff(actualString, "to equal", expectedString) + } + + return format.Message(actual, "to equal", matcher.Expected) +} + +func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to equal", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go new file mode 100644 index 000000000..3ab991e4f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go @@ -0,0 +1,80 @@ +package matchers_test + +import ( + "errors" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("Equal", func() { + Context("when asserting that nil equals nil", func() { + It("should error", func() { + success, err := (&EqualMatcher{Expected: nil}).Match(nil) + + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("When asserting equality between objects", func() { + It("should do the right thing", func() { + Expect(5).Should(Equal(5)) + Expect(5.0).Should(Equal(5.0)) + + Expect(5).ShouldNot(Equal("5")) + Expect(5).ShouldNot(Equal(5.0)) + Expect(5).ShouldNot(Equal(3)) + + Expect("5").Should(Equal("5")) + Expect([]int{1, 2}).Should(Equal([]int{1, 2})) + Expect([]int{1, 2}).ShouldNot(Equal([]int{2, 1})) + Expect([]byte{'f', 'o', 'o'}).Should(Equal([]byte{'f', 'o', 'o'})) + Expect([]byte{'f', 'o', 'o'}).ShouldNot(Equal([]byte{'b', 'a', 'r'})) + Expect(map[string]string{"a": "b", "c": "d"}).Should(Equal(map[string]string{"a": "b", "c": "d"})) + Expect(map[string]string{"a": "b", "c": "d"}).ShouldNot(Equal(map[string]string{"a": "b", "c": "e"})) + Expect(errors.New("foo")).Should(Equal(errors.New("foo"))) + Expect(errors.New("foo")).ShouldNot(Equal(errors.New("bar"))) + + Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).Should(Equal(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}})) + Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "bar", n: 3, f: 2.0, arr: []string{"a", "b"}})) + Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 2, f: 2.0, arr: []string{"a", "b"}})) + Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 3, f: 3.0, arr: []string{"a", "b"}})) + Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b", "c"}})) + }) + }) + + Describe("failure messages", func() { + It("shows the two strings simply when they are short", func() { + subject := EqualMatcher{Expected: "eric"} + + failureMessage := subject.FailureMessage("tim") + Expect(failureMessage).To(BeEquivalentTo(expectedShortStringFailureMessage)) + }) + + It("shows the exact point where two long strings differ", func() { + stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + subject := EqualMatcher{Expected: stringWithZ} + + failureMessage := subject.FailureMessage(stringWithB) + Expect(failureMessage).To(BeEquivalentTo(expectedLongStringFailureMessage)) + }) + }) +}) + +var expectedShortStringFailureMessage = strings.TrimSpace(` +Expected + : tim +to equal + : eric +`) +var expectedLongStringFailureMessage = strings.TrimSpace(` +Expected + : "...aaaaabaaaaa..." +to equal | + : "...aaaaazaaaaa..." +`) diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go new file mode 100644 index 000000000..7ace93dc3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveCapMatcher struct { + Count int +} + +func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := capOf(actual) + if !ok { + return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go new file mode 100644 index 000000000..8a61f2e2c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go @@ -0,0 +1,50 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveCap", func() { + Context("when passed a supported type", func() { + It("should do the right thing", func() { + Expect([0]int{}).Should(HaveCap(0)) + Expect([2]int{1}).Should(HaveCap(2)) + + Expect([]int{}).Should(HaveCap(0)) + Expect([]int{1, 2, 3, 4, 5}[:2]).Should(HaveCap(5)) + Expect(make([]int, 0, 5)).Should(HaveCap(5)) + + c := make(chan bool, 3) + Expect(c).Should(HaveCap(3)) + c <- true + c <- true + Expect(c).Should(HaveCap(3)) + + Expect(make(chan bool)).Should(HaveCap(0)) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilSlice []int + Expect(nilSlice).Should(HaveCap(0)) + + var nilChan chan int + Expect(nilChan).Should(HaveCap(0)) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&HaveCapMatcher{Count: 0}).Match(0) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&HaveCapMatcher{Count: 0}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go new file mode 100644 index 000000000..ea5b92336 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type HaveKeyMatcher struct { + Key interface{} +} + +func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + return true, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "to have key matching", matcher.Key) + default: + return format.Message(actual, "to have key", matcher.Key) + } +} + +func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "not to have key matching", matcher.Key) + default: + return format.Message(actual, "not to have key", matcher.Key) + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go new file mode 100644 index 000000000..0f1561b7d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go @@ -0,0 +1,73 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveKey", func() { + var ( + stringKeys map[string]int + intKeys map[int]string + objKeys map[*myCustomType]string + + customA *myCustomType + customB *myCustomType + ) + BeforeEach(func() { + stringKeys = map[string]int{"foo": 2, "bar": 3} + intKeys = map[int]string{2: "foo", 3: "bar"} + + customA = &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}} + customB = &myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}} + objKeys = map[*myCustomType]string{customA: "aardvark", customB: "kangaroo"} + }) + + Context("when passed a map", func() { + It("should do the right thing", func() { + Expect(stringKeys).Should(HaveKey("foo")) + Expect(stringKeys).ShouldNot(HaveKey("baz")) + + Expect(intKeys).Should(HaveKey(2)) + Expect(intKeys).ShouldNot(HaveKey(4)) + + Expect(objKeys).Should(HaveKey(customA)) + Expect(objKeys).Should(HaveKey(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}})) + Expect(objKeys).ShouldNot(HaveKey(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"apple", "pie"}})) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilMap map[int]string + Expect(nilMap).ShouldNot(HaveKey("foo")) + }) + }) + + Context("when the passed in key is actually a matcher", func() { + It("should pass each element through the matcher", func() { + Expect(stringKeys).Should(HaveKey(ContainSubstring("oo"))) + Expect(stringKeys).ShouldNot(HaveKey(ContainSubstring("foobar"))) + }) + + It("should fail if the matcher ever fails", func() { + actual := map[int]string{1: "a", 3: "b", 2: "c"} + success, err := (&HaveKeyMatcher{Key: ContainSubstring("ar")}).Match(actual) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when passed something that is not a map", func() { + It("should error", func() { + success, err := (&HaveKeyMatcher{Key: "foo"}).Match([]string{"foo"}) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&HaveKeyMatcher{Key: "foo"}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go new file mode 100644 index 000000000..06355b1e9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -0,0 +1,74 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type HaveKeyWithValueMatcher struct { + Key interface{} + Value interface{} +} + +func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher) + if !valueIsMatcher { + valueMatcher = &EqualMatcher{Expected: matcher.Value} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + actualValue := reflect.ValueOf(actual).MapIndex(keys[i]) + success, err := valueMatcher.Match(actualValue.Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error()) + } + return success, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) { + str := "to have {key: value}" + if _, ok := matcher.Key.(omegaMatcher); ok { + str += " matching" + } else if _, ok := matcher.Value.(omegaMatcher); ok { + str += " matching" + } + + expect := make(map[interface{}]interface{}, 1) + expect[matcher.Key] = matcher.Value + return format.Message(actual, str, expect) +} + +func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + kStr := "not to have key" + if _, ok := matcher.Key.(omegaMatcher); ok { + kStr = "not to have key matching" + } + + vStr := "or that key's value not be" + if _, ok := matcher.Value.(omegaMatcher); ok { + vStr = "or to have that key's value not matching" + } + + return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go new file mode 100644 index 000000000..0a49ec993 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go @@ -0,0 +1,82 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveKeyWithValue", func() { + var ( + stringKeys map[string]int + intKeys map[int]string + objKeys map[*myCustomType]*myCustomType + + customA *myCustomType + customB *myCustomType + ) + BeforeEach(func() { + stringKeys = map[string]int{"foo": 2, "bar": 3} + intKeys = map[int]string{2: "foo", 3: "bar"} + + customA = &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}} + customB = &myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}} + objKeys = map[*myCustomType]*myCustomType{customA: customA, customB: customA} + }) + + Context("when passed a map", func() { + It("should do the right thing", func() { + Expect(stringKeys).Should(HaveKeyWithValue("foo", 2)) + Expect(stringKeys).ShouldNot(HaveKeyWithValue("foo", 1)) + Expect(stringKeys).ShouldNot(HaveKeyWithValue("baz", 2)) + Expect(stringKeys).ShouldNot(HaveKeyWithValue("baz", 1)) + + Expect(intKeys).Should(HaveKeyWithValue(2, "foo")) + Expect(intKeys).ShouldNot(HaveKeyWithValue(4, "foo")) + Expect(intKeys).ShouldNot(HaveKeyWithValue(2, "baz")) + + Expect(objKeys).Should(HaveKeyWithValue(customA, customA)) + Expect(objKeys).Should(HaveKeyWithValue(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}}, &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}})) + Expect(objKeys).ShouldNot(HaveKeyWithValue(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"apple", "pie"}}, customA)) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilMap map[int]string + Expect(nilMap).ShouldNot(HaveKeyWithValue("foo", "bar")) + }) + }) + + Context("when the passed in key or value is actually a matcher", func() { + It("should pass each element through the matcher", func() { + Expect(stringKeys).Should(HaveKeyWithValue(ContainSubstring("oo"), 2)) + Expect(intKeys).Should(HaveKeyWithValue(2, ContainSubstring("oo"))) + Expect(stringKeys).ShouldNot(HaveKeyWithValue(ContainSubstring("foobar"), 2)) + }) + + It("should fail if the matcher ever fails", func() { + actual := map[int]string{1: "a", 3: "b", 2: "c"} + success, err := (&HaveKeyWithValueMatcher{Key: ContainSubstring("ar"), Value: 2}).Match(actual) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + otherActual := map[string]int{"a": 1, "b": 2, "c": 3} + success, err = (&HaveKeyWithValueMatcher{Key: "a", Value: ContainSubstring("1")}).Match(otherActual) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when passed something that is not a map", func() { + It("should error", func() { + success, err := (&HaveKeyWithValueMatcher{Key: "foo", Value: "bar"}).Match([]string{"foo"}) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&HaveKeyWithValueMatcher{Key: "foo", Value: "bar"}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go new file mode 100644 index 000000000..ee4276189 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveLenMatcher struct { + Count int +} + +func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go new file mode 100644 index 000000000..c60f63886 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go @@ -0,0 +1,53 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveLen", func() { + Context("when passed a supported type", func() { + It("should do the right thing", func() { + Expect("").Should(HaveLen(0)) + Expect("AA").Should(HaveLen(2)) + + Expect([0]int{}).Should(HaveLen(0)) + Expect([2]int{1, 2}).Should(HaveLen(2)) + + Expect([]int{}).Should(HaveLen(0)) + Expect([]int{1, 2, 3}).Should(HaveLen(3)) + + Expect(map[string]int{}).Should(HaveLen(0)) + Expect(map[string]int{"a": 1, "b": 2, "c": 3, "d": 4}).Should(HaveLen(4)) + + c := make(chan bool, 3) + Expect(c).Should(HaveLen(0)) + c <- true + c <- true + Expect(c).Should(HaveLen(2)) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilSlice []int + Expect(nilSlice).Should(HaveLen(0)) + + var nilMap map[int]string + Expect(nilMap).Should(HaveLen(0)) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&HaveLenMatcher{Count: 0}).Match(0) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&HaveLenMatcher{Count: 0}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go new file mode 100644 index 000000000..ebdd71786 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveOccurredMatcher struct { +} + +func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return false, nil + } + + // must be an 'error' type + if !isError(actual) { + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) + } + + // must be non-nil (or a pointer to a non-nil) + return !isNil(actual), nil +} + +func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1)) +} + +func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "not to have occurred") +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go new file mode 100644 index 000000000..0ad632ec1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go @@ -0,0 +1,59 @@ +package matchers_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +type CustomErr struct { + msg string +} + +func (e *CustomErr) Error() string { + return e.msg +} + +var _ = Describe("HaveOccurred", func() { + It("should succeed if matching an error", func() { + Expect(errors.New("Foo")).Should(HaveOccurred()) + }) + + It("should not succeed with nil", func() { + Expect(nil).ShouldNot(HaveOccurred()) + }) + + It("should only support errors and nil", func() { + success, err := (&HaveOccurredMatcher{}).Match("foo") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&HaveOccurredMatcher{}).Match("") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + + It("doesn't support non-error type", func() { + success, err := (&HaveOccurredMatcher{}).Match(AnyType{}) + Expect(success).Should(BeFalse()) + Expect(err).Should(MatchError("Expected an error-type. Got:\n : {}")) + }) + + It("doesn't support non-error pointer type", func() { + success, err := (&HaveOccurredMatcher{}).Match(&AnyType{}) + Expect(success).Should(BeFalse()) + Expect(err).Should(MatchError(MatchRegexp(`Expected an error-type. Got:\n <*matchers_test.AnyType | 0x[[:xdigit:]]+>: {}`))) + }) + + It("should succeed with pointer types that conform to error interface", func() { + err := &CustomErr{"ohai"} + Expect(err).Should(HaveOccurred()) + }) + + It("should not succeed with nil pointers to types that conform to error interface", func() { + var err *CustomErr = nil + Expect(err).ShouldNot(HaveOccurred()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go new file mode 100644 index 000000000..1d8e80270 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go @@ -0,0 +1,36 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HavePrefixMatcher struct { + Prefix string + Args []interface{} +} + +func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + prefix := matcher.prefix() + return len(actualString) >= len(prefix) && actualString[0:len(prefix)] == prefix, nil +} + +func (matcher *HavePrefixMatcher) prefix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Prefix, matcher.Args...) + } + return matcher.Prefix +} + +func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have prefix", matcher.prefix()) +} + +func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have prefix", matcher.prefix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go new file mode 100644 index 000000000..fe29b7b5d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go @@ -0,0 +1,36 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HavePrefixMatcher", func() { + Context("when actual is a string", func() { + It("should match a string prefix", func() { + Expect("Ab").Should(HavePrefix("A")) + Expect("A").ShouldNot(HavePrefix("Ab")) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Expect("C3PO").Should(HavePrefix("C%dP", 3)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match against the returned string", func() { + Expect(&myStringer{a: "Ab"}).Should(HavePrefix("A")) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&HavePrefixMatcher{Prefix: "2"}).Match(2) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go new file mode 100644 index 000000000..40a3526eb --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go @@ -0,0 +1,36 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveSuffixMatcher struct { + Suffix string + Args []interface{} +} + +func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + suffix := matcher.suffix() + return len(actualString) >= len(suffix) && actualString[len(actualString)-len(suffix):] == suffix, nil +} + +func (matcher *HaveSuffixMatcher) suffix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Suffix, matcher.Args...) + } + return matcher.Suffix +} + +func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have suffix", matcher.suffix()) +} + +func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have suffix", matcher.suffix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go new file mode 100644 index 000000000..2ae29821a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go @@ -0,0 +1,36 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveSuffixMatcher", func() { + Context("when actual is a string", func() { + It("should match a string suffix", func() { + Expect("Ab").Should(HaveSuffix("b")) + Expect("A").ShouldNot(HaveSuffix("Ab")) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Expect("C3PO").Should(HaveSuffix("%dPO", 3)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match against the returned string", func() { + Expect(&myStringer{a: "Ab"}).Should(HaveSuffix("b")) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&HaveSuffixMatcher{Suffix: "2"}).Match(2) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go new file mode 100644 index 000000000..07499ac95 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -0,0 +1,51 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type MatchErrorMatcher struct { + Expected interface{} +} + +func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) { + if isNil(actual) { + return false, fmt.Errorf("Expected an error, got nil") + } + + if !isError(actual) { + return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1)) + } + + actualErr := actual.(error) + + if isError(matcher.Expected) { + return reflect.DeepEqual(actualErr, matcher.Expected), nil + } + + if isString(matcher.Expected) { + return actualErr.Error() == matcher.Expected, nil + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + if matcher.Expected != nil { + subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher) + if hasSubMatcher { + return subMatcher.Match(actualErr.Error()) + } + } + + return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings. Got:\n%s", format.Object(matcher.Expected, 1)) +} + +func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match error", matcher.Expected) +} + +func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match error", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go new file mode 100644 index 000000000..9bf89fc46 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go @@ -0,0 +1,107 @@ +package matchers_test + +import ( + "errors" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +type CustomError struct { +} + +func (c CustomError) Error() string { + return "an error" +} + +var _ = Describe("MatchErrorMatcher", func() { + Context("When asserting against an error", func() { + It("should succeed when matching with an error", func() { + err := errors.New("an error") + fmtErr := fmt.Errorf("an error") + customErr := CustomError{} + + Expect(err).Should(MatchError(errors.New("an error"))) + Expect(err).ShouldNot(MatchError(errors.New("another error"))) + + Expect(fmtErr).Should(MatchError(errors.New("an error"))) + Expect(customErr).Should(MatchError(CustomError{})) + }) + + It("should succeed when matching with a string", func() { + err := errors.New("an error") + fmtErr := fmt.Errorf("an error") + customErr := CustomError{} + + Expect(err).Should(MatchError("an error")) + Expect(err).ShouldNot(MatchError("another error")) + + Expect(fmtErr).Should(MatchError("an error")) + Expect(customErr).Should(MatchError("an error")) + }) + + Context("when passed a matcher", func() { + It("should pass if the matcher passes against the error string", func() { + err := errors.New("error 123 abc") + + Expect(err).Should(MatchError(MatchRegexp(`\d{3}`))) + }) + + It("should fail if the matcher fails against the error string", func() { + err := errors.New("no digits") + Expect(err).ShouldNot(MatchError(MatchRegexp(`\d`))) + }) + }) + + It("should fail when passed anything else", func() { + actualErr := errors.New("an error") + _, err := (&MatchErrorMatcher{ + Expected: []byte("an error"), + }).Match(actualErr) + Expect(err).Should(HaveOccurred()) + + _, err = (&MatchErrorMatcher{ + Expected: 3, + }).Match(actualErr) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when passed nil", func() { + It("should fail", func() { + _, err := (&MatchErrorMatcher{ + Expected: "an error", + }).Match(nil) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a non-error", func() { + It("should fail", func() { + _, err := (&MatchErrorMatcher{ + Expected: "an error", + }).Match("an error") + Expect(err).Should(HaveOccurred()) + + _, err = (&MatchErrorMatcher{ + Expected: "an error", + }).Match(3) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when passed an error that is also a string", func() { + It("should use it as an error", func() { + var e mockErr = "mockErr" + + // this fails if the matcher casts e to a string before comparison + Expect(e).Should(MatchError(e)) + }) + }) +}) + +type mockErr string + +func (m mockErr) Error() string { return string(m) } diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go new file mode 100644 index 000000000..499bb5830 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go @@ -0,0 +1,135 @@ +package matchers + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" + + "github.com/onsi/gomega/format" +) + +type MatchJSONMatcher struct { + JSONToMatch interface{} + firstFailurePath []interface{} +} + +func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.prettyPrint(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + // this is guarded by prettyPrint + json.Unmarshal([]byte(actualString), &aval) + json.Unmarshal([]byte(expectedString), &eval) + var equal bool + equal, matcher.firstFailurePath = deepEqual(aval, eval) + return equal, nil +} + +func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath) +} + +func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath) +} + +func formattedMessage(comparisonMessage string, failurePath []interface{}) string { + var diffMessage string + if len(failurePath) == 0 { + diffMessage = "" + } else { + diffMessage = fmt.Sprintf("\n\nfirst mismatched key: %s", formattedFailurePath(failurePath)) + } + return fmt.Sprintf("%s%s", comparisonMessage, diffMessage) +} + +func formattedFailurePath(failurePath []interface{}) string { + formattedPaths := []string{} + for i := len(failurePath) - 1; i >= 0; i-- { + switch p := failurePath[i].(type) { + case int: + formattedPaths = append(formattedPaths, fmt.Sprintf(`[%d]`, p)) + default: + if i != len(failurePath)-1 { + formattedPaths = append(formattedPaths, ".") + } + formattedPaths = append(formattedPaths, fmt.Sprintf(`"%s"`, p)) + } + } + return strings.Join(formattedPaths, "") +} + +func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, ok := toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok := toString(matcher.JSONToMatch) + if !ok { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.JSONToMatch, 1)) + } + + abuf := new(bytes.Buffer) + ebuf := new(bytes.Buffer) + + if err := json.Indent(abuf, []byte(actualString), "", " "); err != nil { + return "", "", fmt.Errorf("Actual '%s' should be valid JSON, but it is not.\nUnderlying error:%s", actualString, err) + } + + if err := json.Indent(ebuf, []byte(expectedString), "", " "); err != nil { + return "", "", fmt.Errorf("Expected '%s' should be valid JSON, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return abuf.String(), ebuf.String(), nil +} + +func deepEqual(a interface{}, b interface{}) (bool, []interface{}) { + var errorPath []interface{} + if reflect.TypeOf(a) != reflect.TypeOf(b) { + return false, errorPath + } + + switch a.(type) { + case []interface{}: + if len(a.([]interface{})) != len(b.([]interface{})) { + return false, errorPath + } + + for i, v := range a.([]interface{}) { + elementEqual, keyPath := deepEqual(v, b.([]interface{})[i]) + if !elementEqual { + return false, append(keyPath, i) + } + } + return true, errorPath + + case map[string]interface{}: + if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) { + return false, errorPath + } + + for k, v1 := range a.(map[string]interface{}) { + v2, ok := b.(map[string]interface{})[k] + if !ok { + return false, errorPath + } + elementEqual, keyPath := deepEqual(v1, v2) + if !elementEqual { + return false, append(keyPath, k) + } + } + return true, errorPath + + default: + return a == b, errorPath + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go new file mode 100644 index 000000000..690c83f9a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go @@ -0,0 +1,97 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchJSONMatcher", func() { + Context("When passed stringifiables", func() { + It("should succeed if the JSON matches", func() { + Expect("{}").Should(MatchJSON("{}")) + Expect(`{"a":1}`).Should(MatchJSON(`{"a":1}`)) + Expect(`{ + "a":1 + }`).Should(MatchJSON(`{"a":1}`)) + Expect(`{"a":1, "b":2}`).Should(MatchJSON(`{"b":2, "a":1}`)) + Expect(`{"a":1}`).ShouldNot(MatchJSON(`{"b":2, "a":1}`)) + + Expect(`{"a":"a", "b":"b"}`).ShouldNot(MatchJSON(`{"a":"a", "b":"b", "c":"c"}`)) + Expect(`{"a":"a", "b":"b", "c":"c"}`).ShouldNot(MatchJSON(`{"a":"a", "b":"b"}`)) + + Expect(`{"a":null, "b":null}`).ShouldNot(MatchJSON(`{"c":"c", "d":"d"}`)) + Expect(`{"a":null, "b":null, "c":null}`).ShouldNot(MatchJSON(`{"a":null, "b":null, "d":null}`)) + }) + + It("should work with byte arrays", func() { + Expect([]byte("{}")).Should(MatchJSON([]byte("{}"))) + Expect("{}").Should(MatchJSON([]byte("{}"))) + Expect([]byte("{}")).Should(MatchJSON("{}")) + }) + }) + + Context("when a key mismatch is found", func() { + It("reports the first found mismatch", func() { + subject := MatchJSONMatcher{JSONToMatch: `5`} + actual := `7` + subject.Match(actual) + + failureMessage := subject.FailureMessage(`7`) + Expect(failureMessage).ToNot(ContainSubstring("first mismatched key")) + + subject = MatchJSONMatcher{JSONToMatch: `{"a": 1, "b.g": {"c": 2, "1": ["hello", "see ya"]}}`} + actual = `{"a": 1, "b.g": {"c": 2, "1": ["hello", "goodbye"]}}` + subject.Match(actual) + + failureMessage = subject.FailureMessage(actual) + Expect(failureMessage).To(ContainSubstring(`first mismatched key: "b.g"."1"[1]`)) + }) + }) + + Context("when the expected is not valid JSON", func() { + It("should error and explain why", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: `{}`}).Match(`oops`) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("Actual 'oops' should be valid JSON")) + }) + }) + + Context("when the actual is not valid JSON", func() { + It("should error and explain why", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: `oops`}).Match(`{}`) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("Expected 'oops' should be valid JSON")) + }) + }) + + Context("when the expected is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: 2}).Match("{}") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n : 2")) + + success, err = (&MatchJSONMatcher{JSONToMatch: nil}).Match("{}") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n : nil")) + }) + }) + + Context("when the actual is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: "{}"}).Match(2) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n : 2")) + + success, err = (&MatchJSONMatcher{JSONToMatch: "{}"}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n : nil")) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go new file mode 100644 index 000000000..adac5db6b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go @@ -0,0 +1,43 @@ +package matchers + +import ( + "fmt" + "regexp" + + "github.com/onsi/gomega/format" +) + +type MatchRegexpMatcher struct { + Regexp string + Args []interface{} +} + +func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1)) + } + + match, err := regexp.Match(matcher.regexp(), []byte(actualString)) + if err != nil { + return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error()) + } + + return match, nil +} + +func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) regexp() string { + re := matcher.Regexp + if len(matcher.Args) > 0 { + re = fmt.Sprintf(matcher.Regexp, matcher.Args...) + } + return re +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go new file mode 100644 index 000000000..ac2538bb4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go @@ -0,0 +1,44 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchRegexp", func() { + Context("when actual is a string", func() { + It("should match against the string", func() { + Expect(" a2!bla").Should(MatchRegexp(`\d!`)) + Expect(" a2!bla").ShouldNot(MatchRegexp(`[A-Z]`)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match agains the returned string", func() { + Expect(&myStringer{a: "Abc3"}).Should(MatchRegexp(`[A-Z][a-z]+\d`)) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Expect(" a23!bla").Should(MatchRegexp(`\d%d!`, 3)) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&MatchRegexpMatcher{Regexp: `\d`}).Match(2) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when the passed in regexp fails to compile", func() { + It("should error", func() { + success, err := (&MatchRegexpMatcher{Regexp: "("}).Match("Foo") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go new file mode 100644 index 000000000..3b412ce81 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go @@ -0,0 +1,134 @@ +package matchers + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + + "github.com/onsi/gomega/format" + "golang.org/x/net/html/charset" +) + +type MatchXMLMatcher struct { + XMLToMatch interface{} +} + +func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.formattedPrint(actual) + if err != nil { + return false, err + } + + aval, err := parseXmlContent(actualString) + if err != nil { + return false, fmt.Errorf("Actual '%s' should be valid XML, but it is not.\nUnderlying error:%s", actualString, err) + } + + eval, err := parseXmlContent(expectedString) + if err != nil { + return false, fmt.Errorf("Expected '%s' should be valid XML, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return reflect.DeepEqual(aval, eval), nil +} + +func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.formattedPrint(actual) + return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString) +} + +func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.formattedPrint(actual) + return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString) +} + +func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) { + var ok bool + actualString, ok = toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok = toString(matcher.XMLToMatch) + if !ok { + return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.XMLToMatch, 1)) + } + return actualString, expectedString, nil +} + +func parseXmlContent(content string) (*xmlNode, error) { + allNodes := []*xmlNode{} + + dec := newXmlDecoder(strings.NewReader(content)) + for { + tok, err := dec.Token() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("failed to decode next token: %v", err) + } + + lastNodeIndex := len(allNodes) - 1 + var lastNode *xmlNode + if len(allNodes) > 0 { + lastNode = allNodes[lastNodeIndex] + } else { + lastNode = &xmlNode{} + } + + switch tok := tok.(type) { + case xml.StartElement: + attrs := attributesSlice(tok.Attr) + sort.Sort(attrs) + allNodes = append(allNodes, &xmlNode{XMLName: tok.Name, XMLAttr: tok.Attr}) + case xml.EndElement: + if len(allNodes) > 1 { + allNodes[lastNodeIndex-1].Nodes = append(allNodes[lastNodeIndex-1].Nodes, lastNode) + allNodes = allNodes[:lastNodeIndex] + } + case xml.CharData: + lastNode.Content = append(lastNode.Content, tok.Copy()...) + case xml.Comment: + lastNode.Comments = append(lastNode.Comments, tok.Copy()) + case xml.ProcInst: + lastNode.ProcInsts = append(lastNode.ProcInsts, tok.Copy()) + } + } + + if len(allNodes) == 0 { + return nil, errors.New("found no nodes") + } + firstNode := allNodes[0] + trimParentNodesContentSpaces(firstNode) + + return firstNode, nil +} + +func newXmlDecoder(reader io.Reader) *xml.Decoder { + dec := xml.NewDecoder(reader) + dec.CharsetReader = charset.NewReaderLabel + return dec +} + +func trimParentNodesContentSpaces(node *xmlNode) { + if len(node.Nodes) > 0 { + node.Content = bytes.TrimSpace(node.Content) + for _, childNode := range node.Nodes { + trimParentNodesContentSpaces(childNode) + } + } +} + +type xmlNode struct { + XMLName xml.Name + Comments []xml.Comment + ProcInsts []xml.ProcInst + XMLAttr []xml.Attr + Content []byte + Nodes []*xmlNode +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go new file mode 100644 index 000000000..0b559b22e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go @@ -0,0 +1,97 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchXMLMatcher", func() { + + var ( + sample_01 = readFileContents("test_data/xml/sample_01.xml") + sample_02 = readFileContents("test_data/xml/sample_02.xml") + sample_03 = readFileContents("test_data/xml/sample_03.xml") + sample_04 = readFileContents("test_data/xml/sample_04.xml") + sample_05 = readFileContents("test_data/xml/sample_05.xml") + sample_06 = readFileContents("test_data/xml/sample_06.xml") + sample_07 = readFileContents("test_data/xml/sample_07.xml") + sample_08 = readFileContents("test_data/xml/sample_08.xml") + sample_09 = readFileContents("test_data/xml/sample_09.xml") + sample_10 = readFileContents("test_data/xml/sample_10.xml") + sample_11 = readFileContents("test_data/xml/sample_11.xml") + ) + + Context("When passed stringifiables", func() { + It("matches documents regardless of the attribute order", func() { + a := `` + b := `` + Expect(b).Should(MatchXML(a)) + Expect(a).Should(MatchXML(b)) + }) + + It("should succeed if the XML matches", func() { + Expect(sample_01).Should(MatchXML(sample_01)) // same XML + Expect(sample_01).Should(MatchXML(sample_02)) // same XML with blank lines + Expect(sample_01).Should(MatchXML(sample_03)) // same XML with different formatting + Expect(sample_01).ShouldNot(MatchXML(sample_04)) // same structures with different values + Expect(sample_01).ShouldNot(MatchXML(sample_05)) // different structures + Expect(sample_06).ShouldNot(MatchXML(sample_07)) // same xml names with different namespaces + Expect(sample_07).ShouldNot(MatchXML(sample_08)) // same structures with different values + Expect(sample_09).ShouldNot(MatchXML(sample_10)) // same structures with different attribute values + Expect(sample_11).Should(MatchXML(sample_11)) // with non UTF-8 encoding + }) + + It("should work with byte arrays", func() { + Expect([]byte(sample_01)).Should(MatchXML([]byte(sample_01))) + Expect([]byte(sample_01)).Should(MatchXML(sample_01)) + Expect(sample_01).Should(MatchXML([]byte(sample_01))) + }) + }) + + Context("when the expected is not valid XML", func() { + It("should error and explain why", func() { + success, err := (&MatchXMLMatcher{XMLToMatch: sample_01}).Match(`oops`) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("Actual 'oops' should be valid XML")) + }) + }) + + Context("when the actual is not valid XML", func() { + It("should error and explain why", func() { + success, err := (&MatchXMLMatcher{XMLToMatch: `oops`}).Match(sample_01) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("Expected 'oops' should be valid XML")) + }) + }) + + Context("when the expected is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchXMLMatcher{XMLToMatch: 2}).Match(sample_01) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n : 2")) + + success, err = (&MatchXMLMatcher{XMLToMatch: nil}).Match(sample_01) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n : nil")) + }) + }) + + Context("when the actual is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchXMLMatcher{XMLToMatch: sample_01}).Match(2) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n : 2")) + + success, err = (&MatchXMLMatcher{XMLToMatch: sample_01}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n : nil")) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go new file mode 100644 index 000000000..69fb51a85 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -0,0 +1,74 @@ +package matchers + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/gomega/format" + "gopkg.in/yaml.v2" +) + +type MatchYAMLMatcher struct { + YAMLToMatch interface{} +} + +func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.toStrings(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil { + return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err) + } + if err := yaml.Unmarshal([]byte(expectedString), &eval); err != nil { + return false, fmt.Errorf("Expected '%s' should be valid YAML, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return reflect.DeepEqual(aval, eval), nil +} + +func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.toNormalisedStrings(actual) + return format.Message(actualString, "to match YAML of", expectedString) +} + +func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.toNormalisedStrings(actual) + return format.Message(actualString, "not to match YAML of", expectedString) +} + +func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, expectedString, err := matcher.toStrings(actual) + return normalise(actualString), normalise(expectedString), err +} + +func normalise(input string) string { + var val interface{} + err := yaml.Unmarshal([]byte(input), &val) + if err != nil { + panic(err) // guarded by Match + } + output, err := yaml.Marshal(val) + if err != nil { + panic(err) // guarded by Unmarshal + } + return strings.TrimSpace(string(output)) +} + +func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, ok := toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok := toString(matcher.YAMLToMatch) + if !ok { + return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.YAMLToMatch, 1)) + } + + return actualString, expectedString, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go new file mode 100644 index 000000000..8e63de19e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go @@ -0,0 +1,94 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchYAMLMatcher", func() { + Context("When passed stringifiables", func() { + It("should succeed if the YAML matches", func() { + Expect("---").Should(MatchYAML("")) + Expect("a: 1").Should(MatchYAML(`{"a":1}`)) + Expect("a: 1\nb: 2").Should(MatchYAML(`{"b":2, "a":1}`)) + }) + + It("should explain if the YAML does not match when it should", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 1"}).FailureMessage("b: 2") + Expect(message).To(MatchRegexp(`Expected\s+: b: 2\s+to match YAML of\s+: a: 1`)) + }) + + It("should normalise the expected and actual when explaining if the YAML does not match when it should", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 'one'"}).FailureMessage("{b: two}") + Expect(message).To(MatchRegexp(`Expected\s+: b: two\s+to match YAML of\s+: a: one`)) + }) + + It("should explain if the YAML matches when it should not", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 1"}).NegatedFailureMessage("a: 1") + Expect(message).To(MatchRegexp(`Expected\s+: a: 1\s+not to match YAML of\s+: a: 1`)) + }) + + It("should normalise the expected and actual when explaining if the YAML matches when it should not", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 'one'"}).NegatedFailureMessage("{a: one}") + Expect(message).To(MatchRegexp(`Expected\s+: a: one\s+not to match YAML of\s+: a: one`)) + }) + + It("should fail if the YAML does not match", func() { + Expect("a: 1").ShouldNot(MatchYAML(`{"b":2, "a":1}`)) + }) + + It("should work with byte arrays", func() { + Expect([]byte("a: 1")).Should(MatchYAML([]byte("a: 1"))) + Expect("a: 1").Should(MatchYAML([]byte("a: 1"))) + Expect([]byte("a: 1")).Should(MatchYAML("a: 1")) + }) + }) + + Context("when the expected is not valid YAML", func() { + It("should error and explain why", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: ""}).Match("good:\nbad") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("Actual 'good:\nbad' should be valid YAML")) + }) + }) + + Context("when the actual is not valid YAML", func() { + It("should error and explain why", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: "good:\nbad"}).Match("") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("Expected 'good:\nbad' should be valid YAML")) + }) + }) + + Context("when the expected is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: 2}).Match("") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n : 2")) + + success, err = (&MatchYAMLMatcher{YAMLToMatch: nil}).Match("") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n : nil")) + }) + }) + + Context("when the actual is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: ""}).Match(2) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n : 2")) + + success, err = (&MatchYAMLMatcher{YAMLToMatch: ""}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n : nil")) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go b/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go new file mode 100644 index 000000000..b5f76c995 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go @@ -0,0 +1,50 @@ +package matchers_test + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type myStringer struct { + a string +} + +func (s *myStringer) String() string { + return s.a +} + +type StringAlias string + +type myCustomType struct { + s string + n int + f float32 + arr []string +} + +func Test(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gomega Matchers") +} + +func readFileContents(filePath string) []byte { + f := openFile(filePath) + b, err := ioutil.ReadAll(f) + if err != nil { + panic(fmt.Errorf("failed to read file contents: %v", err)) + } + return b +} + +func openFile(filePath string) *os.File { + f, err := os.Open(filePath) + if err != nil { + panic(fmt.Errorf("failed to open file: %v", err)) + } + return f +} diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go new file mode 100644 index 000000000..2c91670bd --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/not.go @@ -0,0 +1,30 @@ +package matchers + +import ( + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type NotMatcher struct { + Matcher types.GomegaMatcher +} + +func (m *NotMatcher) Match(actual interface{}) (bool, error) { + success, err := m.Matcher.Match(actual) + if err != nil { + return false, err + } + return !success, nil +} + +func (m *NotMatcher) FailureMessage(actual interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.Matcher.FailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value +} diff --git a/vendor/github.com/onsi/gomega/matchers/not_test.go b/vendor/github.com/onsi/gomega/matchers/not_test.go new file mode 100644 index 000000000..b3c1fdbf0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/not_test.go @@ -0,0 +1,57 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("NotMatcher", func() { + Context("basic examples", func() { + It("works", func() { + Expect(input).To(Not(false1)) + Expect(input).To(Not(Not(true2))) + Expect(input).ToNot(Not(true3)) + Expect(input).ToNot(Not(Not(false1))) + Expect(input).To(Not(Not(Not(false2)))) + }) + }) + + Context("De Morgan's laws", func() { + It("~(A && B) == ~A || ~B", func() { + Expect(input).To(Not(And(false1, false2))) + Expect(input).To(Or(Not(false1), Not(false2))) + }) + It("~(A || B) == ~A && ~B", func() { + Expect(input).To(Not(Or(false1, false2))) + Expect(input).To(And(Not(false1), Not(false2))) + }) + }) + + Context("failure messages are opposite of original matchers' failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(HaveLen(2)), input, "not to have length 2") + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(Not(HaveLen(3))), input, "to have length 3") + }) + }) + }) + + Context("MatchMayChangeInTheFuture()", func() { + It("Propagates value from wrapped matcher", func() { + m := Not(Or()) // an empty Or() always returns false, and indicates it cannot change + Expect(m.Match("anything")).To(BeTrue()) + Expect(m.(*NotMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse()) + }) + It("Defaults to true", func() { + m := Not(Equal(1)) // Equal does not have this method + Expect(m.Match(2)).To(BeTrue()) + Expect(m.(*NotMatcher).MatchMayChangeInTheFuture(2)).To(BeTrue()) // defaults to true + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go new file mode 100644 index 000000000..3bf799800 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/or.go @@ -0,0 +1,67 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type OrMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstSuccessfulMatcher types.GomegaMatcher +} + +func (m *OrMatcher) Match(actual interface{}) (success bool, err error) { + m.firstSuccessfulMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if err != nil { + return false, err + } + if success { + m.firstSuccessfulMatcher = matcher + return true, nil + } + } + return false, nil +} + +func (m *OrMatcher) FailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers)) +} + +func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.firstSuccessfulMatcher.NegatedFailureMessage(actual) +} + +func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: F, T, => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become F + + Match eval: F, F, F => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to T. + */ + + if m.firstSuccessfulMatcher != nil { + // one of the matchers succeeded.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual) + } else { + // so all matchers failed.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/or_test.go b/vendor/github.com/onsi/gomega/matchers/or_test.go new file mode 100644 index 000000000..9589a174d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/or_test.go @@ -0,0 +1,85 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("OrMatcher", func() { + It("works with positive cases", func() { + Expect(input).To(Or(true1)) + Expect(input).To(Or(true1, true2)) + Expect(input).To(Or(true1, false1)) + Expect(input).To(Or(false1, true2)) + Expect(input).To(Or(true1, true2, true3)) + Expect(input).To(Or(true1, true2, false3)) + Expect(input).To(Or(true1, false2, true3)) + Expect(input).To(Or(false1, true2, true3)) + Expect(input).To(Or(true1, false2, false3)) + Expect(input).To(Or(false1, false2, true3)) + + // use alias + Expect(input).To(SatisfyAny(false1, false2, true3)) + }) + + It("works with negative cases", func() { + Expect(input).ToNot(Or()) + Expect(input).ToNot(Or(false1)) + Expect(input).ToNot(Or(false1, false2)) + Expect(input).ToNot(Or(false1, false2, false3)) + }) + + Context("failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Or(false1, false2), input, + "To satisfy at least one of these matchers: [%!s(*matchers.HaveLenMatcher=&{1}) %!s(*matchers.EqualMatcher=&{hip})]") + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(Or(true1, true2)), input, `not to have length 2`) + }) + }) + }) + + Context("MatchMayChangeInTheFuture", func() { + Context("Match returned false", func() { + It("returns true if any of the matchers could change", func() { + // 3 matchers, all return false, and all could change + m := Or(BeNil(), Equal("hip"), HaveLen(1)) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // all 3 of these matchers default to 'true' + }) + It("returns false if none of the matchers could change", func() { + // empty Or() has the property of never matching, and never can change since there are no sub-matchers that could change + m := Or() + Expect(m.Match("anything")).To(BeFalse()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse()) + + // Or() with 3 sub-matchers that return false, and can't change + m = Or(Or(), Or(), Or()) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // the 3 empty Or()'s won't change + }) + }) + Context("Match returned true", func() { + Context("returns value of the successful matcher", func() { + It("false if successful matcher not going to change", func() { + // 3 matchers: 1st returns false, 2nd returns true and is not going to change, 3rd is never called + m := Or(BeNil(), And(), Equal(1)) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) + }) + It("true if successful matcher indicates it might change", func() { + // 3 matchers: 1st returns false, 2nd returns true and "might" change, 3rd is never called + m := Or(Not(BeNil()), Equal("hi"), Equal(1)) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // Equal("hi") indicates it might change + }) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go new file mode 100644 index 000000000..640f4db1a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go @@ -0,0 +1,46 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type PanicMatcher struct { + object interface{} +} + +func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return false, fmt.Errorf("PanicMatcher expects a non-nil actual.") + } + + actualType := reflect.TypeOf(actual) + if actualType.Kind() != reflect.Func { + return false, fmt.Errorf("PanicMatcher expects a function. Got:\n%s", format.Object(actual, 1)) + } + if !(actualType.NumIn() == 0 && actualType.NumOut() == 0) { + return false, fmt.Errorf("PanicMatcher expects a function with no arguments and no return value. Got:\n%s", format.Object(actual, 1)) + } + + success = false + defer func() { + if e := recover(); e != nil { + matcher.object = e + success = true + } + }() + + reflect.ValueOf(actual).Call([]reflect.Value{}) + + return +} + +func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to panic") +} + +func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1))) +} diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go new file mode 100644 index 000000000..96270d5aa --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go @@ -0,0 +1,45 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("Panic", func() { + Context("when passed something that's not a function that takes zero arguments and returns nothing", func() { + It("should error", func() { + success, err := (&PanicMatcher{}).Match("foo") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&PanicMatcher{}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&PanicMatcher{}).Match(func(foo string) {}) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&PanicMatcher{}).Match(func() string { return "bar" }) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a function of the correct type", func() { + It("should call the function and pass if the function panics", func() { + Expect(func() { panic("ack!") }).Should(Panic()) + Expect(func() {}).ShouldNot(Panic()) + }) + }) + + Context("when assertion fails", func() { + It("should print the object passed to Panic", func() { + failuresMessages := InterceptGomegaFailures(func() { + Expect(func() { panic("ack!") }).ShouldNot(Panic()) + }) + Expect(failuresMessages).Should(ConsistOf(MatchRegexp("not to panic, but panicked with\\s*: ack!"))) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go new file mode 100644 index 000000000..74e9e7ebe --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -0,0 +1,122 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ReceiveMatcher struct { + Arg interface{} + receivedValue reflect.Value + channelClosed bool +} + +func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("ReceiveMatcher matcher cannot be passed a send-only channel. Got:\n%s", format.Object(actual, 1)) + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + + if matcher.Arg != nil { + subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + if !hasSubMatcher { + argType := reflect.TypeOf(matcher.Arg) + if argType.Kind() != reflect.Ptr { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) + } + + assignable := channelType.Elem().AssignableTo(argType.Elem()) + if !assignable { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(matcher.Arg, 1)) + } + } + } + + winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var closed bool + var didReceive bool + if winnerIndex == 0 { + closed = !open + didReceive = open + } + matcher.channelClosed = closed + + if closed { + return false, nil + } + + if hasSubMatcher { + if didReceive { + matcher.receivedValue = value + return subMatcher.Match(matcher.receivedValue.Interface()) + } + return false, nil + } + + if didReceive { + if matcher.Arg != nil { + outValue := reflect.ValueOf(matcher.Arg) + reflect.Indirect(outValue).Set(value) + } + + return true, nil + } + return false, nil +} + +func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.FailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } + return format.Message(actual, "to receive something."+closedAddendum) +} + +func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } + return format.Message(actual, "not to receive anything."+closedAddendum) +} + +func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go new file mode 100644 index 000000000..66330fb30 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go @@ -0,0 +1,280 @@ +package matchers_test + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +type kungFuActor interface { + DrunkenMaster() bool +} + +type jackie struct { + name string +} + +func (j *jackie) DrunkenMaster() bool { + return true +} + +var _ = Describe("ReceiveMatcher", func() { + Context("with no argument", func() { + Context("for a buffered channel", func() { + It("should succeed", func() { + channel := make(chan bool, 1) + + Expect(channel).ShouldNot(Receive()) + + channel <- true + + Expect(channel).Should(Receive()) + }) + }) + + Context("for an unbuffered channel", func() { + It("should succeed (eventually)", func() { + channel := make(chan bool) + + Expect(channel).ShouldNot(Receive()) + + go func() { + time.Sleep(10 * time.Millisecond) + channel <- true + }() + + Eventually(channel).Should(Receive()) + }) + }) + }) + + Context("with a pointer argument", func() { + Context("of the correct type", func() { + It("should write the value received on the channel to the pointer", func() { + channel := make(chan int, 1) + + var value int + + Expect(channel).ShouldNot(Receive(&value)) + Expect(value).Should(BeZero()) + + channel <- 17 + + Expect(channel).Should(Receive(&value)) + Expect(value).Should(Equal(17)) + }) + }) + + Context("to various types of objects", func() { + It("should work", func() { + //channels of strings + stringChan := make(chan string, 1) + stringChan <- "foo" + + var s string + Expect(stringChan).Should(Receive(&s)) + Expect(s).Should(Equal("foo")) + + //channels of slices + sliceChan := make(chan []bool, 1) + sliceChan <- []bool{true, true, false} + + var sl []bool + Expect(sliceChan).Should(Receive(&sl)) + Expect(sl).Should(Equal([]bool{true, true, false})) + + //channels of channels + chanChan := make(chan chan bool, 1) + c := make(chan bool) + chanChan <- c + + var receivedC chan bool + Expect(chanChan).Should(Receive(&receivedC)) + Expect(receivedC).Should(Equal(c)) + + //channels of interfaces + jackieChan := make(chan kungFuActor, 1) + aJackie := &jackie{name: "Jackie Chan"} + jackieChan <- aJackie + + var theJackie kungFuActor + Expect(jackieChan).Should(Receive(&theJackie)) + Expect(theJackie).Should(Equal(aJackie)) + }) + }) + + Context("of the wrong type", func() { + It("should error", func() { + channel := make(chan int) + var incorrectType bool + + success, err := (&ReceiveMatcher{Arg: &incorrectType}).Match(channel) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + var notAPointer int + success, err = (&ReceiveMatcher{Arg: notAPointer}).Match(channel) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + }) + + Context("with a matcher", func() { + It("should defer to the underlying matcher", func() { + intChannel := make(chan int, 1) + intChannel <- 3 + Expect(intChannel).Should(Receive(Equal(3))) + + intChannel <- 2 + Expect(intChannel).ShouldNot(Receive(Equal(3))) + + stringChannel := make(chan []string, 1) + stringChannel <- []string{"foo", "bar", "baz"} + Expect(stringChannel).Should(Receive(ContainElement(ContainSubstring("fo")))) + + stringChannel <- []string{"foo", "bar", "baz"} + Expect(stringChannel).ShouldNot(Receive(ContainElement(ContainSubstring("archipelago")))) + }) + + It("should defer to the underlying matcher for the message", func() { + matcher := Receive(Equal(3)) + channel := make(chan int, 1) + channel <- 2 + matcher.Match(channel) + Expect(matcher.FailureMessage(channel)).Should(MatchRegexp(`Expected\s+: 2\s+to equal\s+: 3`)) + + channel <- 3 + matcher.Match(channel) + Expect(matcher.NegatedFailureMessage(channel)).Should(MatchRegexp(`Expected\s+: 3\s+not to equal\s+: 3`)) + }) + + It("should work just fine with Eventually", func() { + stringChannel := make(chan string) + + go func() { + time.Sleep(5 * time.Millisecond) + stringChannel <- "A" + time.Sleep(5 * time.Millisecond) + stringChannel <- "B" + }() + + Eventually(stringChannel).Should(Receive(Equal("B"))) + }) + + Context("if the matcher errors", func() { + It("should error", func() { + channel := make(chan int, 1) + channel <- 3 + success, err := (&ReceiveMatcher{Arg: ContainSubstring("three")}).Match(channel) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("if nothing is received", func() { + It("should fail", func() { + channel := make(chan int, 1) + success, err := (&ReceiveMatcher{Arg: Equal(1)}).Match(channel) + Expect(success).Should(BeFalse()) + Expect(err).ShouldNot(HaveOccurred()) + }) + }) + }) + + Context("When actual is a *closed* channel", func() { + Context("for a buffered channel", func() { + It("should work until it hits the end of the buffer", func() { + channel := make(chan bool, 1) + channel <- true + + close(channel) + + Expect(channel).Should(Receive()) + Expect(channel).ShouldNot(Receive()) + }) + }) + + Context("for an unbuffered channel", func() { + It("should always fail", func() { + channel := make(chan bool) + close(channel) + + Expect(channel).ShouldNot(Receive()) + }) + }) + }) + + Context("When actual is a send-only channel", func() { + It("should error", func() { + channel := make(chan bool) + + var writerChannel chan<- bool + writerChannel = channel + + success, err := (&ReceiveMatcher{}).Match(writerChannel) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Context("when acutal is a non-channel", func() { + It("should error", func() { + var nilChannel chan bool + + success, err := (&ReceiveMatcher{}).Match(nilChannel) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&ReceiveMatcher{}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&ReceiveMatcher{}).Match(3) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) + + Describe("when used with eventually and a custom matcher", func() { + It("should return the matcher's error when a failing value is received on the channel, instead of the must receive something failure", func() { + failures := InterceptGomegaFailures(func() { + c := make(chan string, 0) + Eventually(c, 0.01).Should(Receive(Equal("hello"))) + }) + Expect(failures[0]).Should(ContainSubstring("When passed a matcher, ReceiveMatcher's channel *must* receive something.")) + + failures = InterceptGomegaFailures(func() { + c := make(chan string, 1) + c <- "hi" + Eventually(c, 0.01).Should(Receive(Equal("hello"))) + }) + Expect(failures[0]).Should(ContainSubstring(": hello")) + }) + }) + + Describe("Bailing early", func() { + It("should bail early when passed a closed channel", func() { + c := make(chan bool) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(c).Should(Receive()) + }) + Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + Expect(failures).Should(HaveLen(1)) + }) + + It("should bail early when passed a non-channel", func() { + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(3).Should(Receive()) + }) + Expect(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + Expect(failures).Should(HaveLen(1)) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go new file mode 100644 index 000000000..721ed5529 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type SucceedMatcher struct { +} + +func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return true, nil + } + + // must be an 'error' type + if !isError(actual) { + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) + } + + // must be nil (or a pointer to a nil) + return isNil(actual), nil +} + +func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1)) +} + +func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return "Expected failure, but got no error." +} diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go new file mode 100644 index 000000000..21ea64c80 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go @@ -0,0 +1,62 @@ +package matchers_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +func Erroring() error { + return errors.New("bam") +} + +func NotErroring() error { + return nil +} + +type AnyType struct{} + +func Invalid() *AnyType { + return nil +} + +var _ = Describe("Succeed", func() { + It("should succeed if the function succeeds", func() { + Expect(NotErroring()).Should(Succeed()) + }) + + It("should succeed (in the negated) if the function errored", func() { + Expect(Erroring()).ShouldNot(Succeed()) + }) + + It("should not if passed a non-error", func() { + success, err := (&SucceedMatcher{}).Match(Invalid()) + Expect(success).Should(BeFalse()) + Expect(err).Should(MatchError("Expected an error-type. Got:\n <*matchers_test.AnyType | 0x0>: nil")) + }) + + It("doesn't support non-error type", func() { + success, err := (&SucceedMatcher{}).Match(AnyType{}) + Expect(success).Should(BeFalse()) + Expect(err).Should(MatchError("Expected an error-type. Got:\n : {}")) + }) + + It("doesn't support non-error pointer type", func() { + success, err := (&SucceedMatcher{}).Match(&AnyType{}) + Expect(success).Should(BeFalse()) + Expect(err).Should(MatchError(MatchRegexp(`Expected an error-type. Got:\n <*matchers_test.AnyType | 0x[[:xdigit:]]+>: {}`))) + }) + + It("should not succeed with pointer types that conform to error interface", func() { + err := &CustomErr{"ohai"} + Expect(err).ShouldNot(Succeed()) + }) + + It("should succeed with nil pointers to types that conform to error interface", func() { + var err *CustomErr = nil + Expect(err).Should(Succeed()) + }) + +}) diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE b/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE new file mode 100644 index 000000000..8edd8175a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2014 Amit Kumar Gupta + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go new file mode 100644 index 000000000..81b377111 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -0,0 +1,41 @@ +package bipartitegraph + +import "errors" +import "fmt" + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" + +type BipartiteGraph struct { + Left NodeOrderedSet + Right NodeOrderedSet + Edges EdgeSet +} + +func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { + left := NodeOrderedSet{} + for i, _ := range leftValues { + left = append(left, Node{Id: i}) + } + + right := NodeOrderedSet{} + for j, _ := range rightValues { + right = append(right, Node{Id: j + len(left)}) + } + + edges := EdgeSet{} + for i, leftValue := range leftValues { + for j, rightValue := range rightValues { + neighbours, err := neighbours(leftValue, rightValue) + if err != nil { + return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())) + } + + if neighbours { + edges = append(edges, Edge{Node1: left[i], Node2: right[j]}) + } + } + } + + return &BipartiteGraph{left, right, edges}, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go new file mode 100644 index 000000000..8181f43a4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -0,0 +1,159 @@ +package bipartitegraph + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" +import "github.com/onsi/gomega/matchers/support/goraph/util" + +func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) { + paths := bg.maximalDisjointSLAPCollection(matching) + + for len(paths) > 0 { + for _, path := range paths { + matching = matching.SymmetricDifference(path) + } + paths = bg.maximalDisjointSLAPCollection(matching) + } + + return +} + +func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (result []EdgeSet) { + guideLayers := bg.createSLAPGuideLayers(matching) + if len(guideLayers) == 0 { + return + } + + used := make(map[Node]bool) + + for _, u := range guideLayers[len(guideLayers)-1] { + slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used) + if found { + for _, edge := range slap { + used[edge.Node1] = true + used[edge.Node2] = true + } + result = append(result, slap) + } + } + + return +} + +func (bg *BipartiteGraph) findDisjointSLAP( + start Node, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) ([]Edge, bool) { + return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used) +} + +func (bg *BipartiteGraph) findDisjointSLAPHelper( + currentNode Node, + currentSLAP EdgeSet, + currentLevel int, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) (EdgeSet, bool) { + used[currentNode] = true + + if currentLevel == 0 { + return currentSLAP, true + } + + for _, nextNode := range guideLayers[currentLevel-1] { + if used[nextNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(currentNode, nextNode) + if !found { + continue + } + + if matching.Contains(edge) == util.Odd(currentLevel) { + continue + } + + currentSLAP = append(currentSLAP, edge) + slap, found := bg.findDisjointSLAPHelper(nextNode, currentSLAP, currentLevel-1, matching, guideLayers, used) + if found { + return slap, true + } + currentSLAP = currentSLAP[:len(currentSLAP)-1] + } + + used[currentNode] = false + return nil, false +} + +func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) { + used := make(map[Node]bool) + currentLayer := NodeOrderedSet{} + + for _, node := range bg.Left { + if matching.Free(node) { + used[node] = true + currentLayer = append(currentLayer, node) + } + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } + guideLayers = append(guideLayers, currentLayer) + + done := false + + for !done { + lastLayer := currentLayer + currentLayer = NodeOrderedSet{} + + if util.Odd(len(guideLayers)) { + for _, leftNode := range lastLayer { + for _, rightNode := range bg.Right { + if used[rightNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, rightNode) + used[rightNode] = true + + if matching.Free(rightNode) { + done = true + } + } + } + } else { + for _, rightNode := range lastLayer { + for _, leftNode := range bg.Left { + if used[leftNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || !matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, leftNode) + used[leftNode] = true + } + } + + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } + guideLayers = append(guideLayers, currentLayer) + } + + return +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go new file mode 100644 index 000000000..4fd15cc06 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go @@ -0,0 +1,61 @@ +package edge + +import . "github.com/onsi/gomega/matchers/support/goraph/node" + +type Edge struct { + Node1 Node + Node2 Node +} + +type EdgeSet []Edge + +func (ec EdgeSet) Free(node Node) bool { + for _, e := range ec { + if e.Node1 == node || e.Node2 == node { + return false + } + } + + return true +} + +func (ec EdgeSet) Contains(edge Edge) bool { + for _, e := range ec { + if e == edge { + return true + } + } + + return false +} + +func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) { + for _, e := range ec { + if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) { + return e, true + } + } + + return Edge{}, false +} + +func (ec EdgeSet) SymmetricDifference(ec2 EdgeSet) EdgeSet { + edgesToInclude := make(map[Edge]bool) + + for _, e := range ec { + edgesToInclude[e] = true + } + + for _, e := range ec2 { + edgesToInclude[e] = !edgesToInclude[e] + } + + result := EdgeSet{} + for e, include := range edgesToInclude { + if include { + result = append(result, e) + } + } + + return result +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go new file mode 100644 index 000000000..800c2ea8c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go @@ -0,0 +1,7 @@ +package node + +type Node struct { + Id int +} + +type NodeOrderedSet []Node diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go new file mode 100644 index 000000000..d76a1ee00 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go @@ -0,0 +1,7 @@ +package util + +import "math" + +func Odd(n int) bool { + return math.Mod(float64(n), 2.0) == 1.0 +} diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml new file mode 100644 index 000000000..90f0a1b45 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml @@ -0,0 +1,6 @@ + + Tove + Jani + Reminder + Don't forget me this weekend! + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml new file mode 100644 index 000000000..3863b83c3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml @@ -0,0 +1,9 @@ + + + + Tove + Jani + Reminder + Don't forget me this weekend! + + diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml new file mode 100644 index 000000000..a491c213c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml @@ -0,0 +1 @@ + Tove Jani Reminder Don't forget me this weekend! diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml new file mode 100644 index 000000000..dcfd3db03 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml @@ -0,0 +1,6 @@ + + Tove + John + Doe + Don't forget me this weekend! + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml new file mode 100644 index 000000000..de15a6a55 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml @@ -0,0 +1,211 @@ + + + + Empire Burlesque + Bob Dylan + USA + Columbia + 10.90 + 1985 + + + Hide your heart + Bonnie Tyler + UK + CBS Records + 9.90 + 1988 + + + Greatest Hits + Dolly Parton + USA + RCA + 9.90 + 1982 + + + Still got the blues + Gary Moore + UK + Virgin records + 10.20 + 1990 + + + Eros + Eros Ramazzotti + EU + BMG + 9.90 + 1997 + + + One night only + Bee Gees + UK + Polydor + 10.90 + 1998 + + + Sylvias Mother + Dr.Hook + UK + CBS + 8.10 + 1973 + + + Maggie May + Rod Stewart + UK + Pickwick + 8.50 + 1990 + + + Romanza + Andrea Bocelli + EU + Polydor + 10.80 + 1996 + + + When a man loves a woman + Percy Sledge + USA + Atlantic + 8.70 + 1987 + + + Black angel + Savage Rose + EU + Mega + 10.90 + 1995 + + + 1999 Grammy Nominees + Many + USA + Grammy + 10.20 + 1999 + + + For the good times + Kenny Rogers + UK + Mucik Master + 8.70 + 1995 + + + Big Willie style + Will Smith + USA + Columbia + 9.90 + 1997 + + + Tupelo Honey + Van Morrison + UK + Polydor + 8.20 + 1971 + + + Soulsville + Jorn Hoel + Norway + WEA + 7.90 + 1996 + + + The very best of + Cat Stevens + UK + Island + 8.90 + 1990 + + + Stop + Sam Brown + UK + A and M + 8.90 + 1988 + + + Bridge of Spies + T'Pau + UK + Siren + 7.90 + 1987 + + + Private Dancer + Tina Turner + UK + Capitol + 8.90 + 1983 + + + Midt om natten + Kim Larsen + EU + Medley + 7.80 + 1983 + + + Pavarotti Gala Concert + Luciano Pavarotti + UK + DECCA + 9.90 + 1991 + + + The dock of the bay + Otis Redding + USA + Stax Records + 7.90 + 1968 + + + Picture book + Simply Red + EU + Elektra + 7.20 + 1985 + + + Red + The Communards + UK + London + 7.80 + 1987 + + + Unchain my heart + Joe Cocker + USA + EMI + 8.20 + 1987 + + diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml new file mode 100644 index 000000000..4ba90fb97 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml @@ -0,0 +1,13 @@ + + + + + + +
ApplesBananas
+ + African Coffee Table + 80 + 120 +
+
\ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml new file mode 100644 index 000000000..34b9e9775 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml @@ -0,0 +1,13 @@ + + + + Apples + Bananas + + + + African Coffee Table + 80 + 120 + + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml new file mode 100644 index 000000000..ccaee4e1a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml @@ -0,0 +1,13 @@ + + + + Apples + Oranges + + + + African Coffee Table + 80 + 120 + + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml new file mode 100644 index 000000000..531f84d3f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml @@ -0,0 +1,4 @@ + + Foo + Bar + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml new file mode 100644 index 000000000..b1e1e1fbe --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml @@ -0,0 +1,4 @@ + + Foo + Bar + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml new file mode 100644 index 000000000..3132b0f90 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml @@ -0,0 +1,7 @@ + + + Tove + Jani + Reminder + Don't forget me this weekend! + diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go new file mode 100644 index 000000000..b05a5e75d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -0,0 +1,173 @@ +/* +Gomega matchers + +This package implements the Gomega matchers and does not typically need to be imported. +See the docs for Gomega for documentation on the matchers + +http://onsi.github.io/gomega/ +*/ +package matchers + +import ( + "fmt" + "reflect" +) + +type omegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} + +func isBool(a interface{}) bool { + return reflect.TypeOf(a).Kind() == reflect.Bool +} + +func isNumber(a interface{}) bool { + if a == nil { + return false + } + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Float64 +} + +func isInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Int64 +} + +func isUnsignedInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Uint <= kind && kind <= reflect.Uint64 +} + +func isFloat(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Float32 <= kind && kind <= reflect.Float64 +} + +func toInteger(a interface{}) int64 { + if isInteger(a) { + return reflect.ValueOf(a).Int() + } else if isUnsignedInteger(a) { + return int64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return int64(reflect.ValueOf(a).Float()) + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func toUnsignedInteger(a interface{}) uint64 { + if isInteger(a) { + return uint64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return reflect.ValueOf(a).Uint() + } else if isFloat(a) { + return uint64(reflect.ValueOf(a).Float()) + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func toFloat(a interface{}) float64 { + if isInteger(a) { + return float64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return float64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return reflect.ValueOf(a).Float() + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func isError(a interface{}) bool { + _, ok := a.(error) + return ok +} + +func isChan(a interface{}) bool { + if isNil(a) { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Chan +} + +func isMap(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Map +} + +func isArrayOrSlice(a interface{}) bool { + if a == nil { + return false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Slice: + return true + default: + return false + } +} + +func isString(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.String +} + +func toString(a interface{}) (string, bool) { + aString, isString := a.(string) + if isString { + return aString, true + } + + aBytes, isBytes := a.([]byte) + if isBytes { + return string(aBytes), true + } + + aStringer, isStringer := a.(fmt.Stringer) + if isStringer { + return aStringer.String(), true + } + + return "", false +} + +func lengthOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Len(), true + default: + return 0, false + } +} +func capOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Cap(), true + default: + return 0, false + } +} + +func isNil(a interface{}) bool { + if a == nil { + return true + } + + switch reflect.TypeOf(a).Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return reflect.ValueOf(a).IsNil() + } + + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go new file mode 100644 index 000000000..8e58d8a0f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -0,0 +1,72 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type WithTransformMatcher struct { + // input + Transform interface{} // must be a function of one parameter that returns one value + Matcher types.GomegaMatcher + + // cached value + transformArgType reflect.Type + + // state + transformedValue interface{} +} + +func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher { + if transform == nil { + panic("transform function cannot be nil") + } + txType := reflect.TypeOf(transform) + if txType.NumIn() != 1 { + panic("transform function must have 1 argument") + } + if txType.NumOut() != 1 { + panic("transform function must have 1 return value") + } + + return &WithTransformMatcher{ + Transform: transform, + Matcher: matcher, + transformArgType: reflect.TypeOf(transform).In(0), + } +} + +func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { + // return error if actual's type is incompatible with Transform function's argument type + actualType := reflect.TypeOf(actual) + if !actualType.AssignableTo(m.transformArgType) { + return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType) + } + + // call the Transform function with `actual` + fn := reflect.ValueOf(m.Transform) + result := fn.Call([]reflect.Value{reflect.ValueOf(actual)}) + m.transformedValue = result[0].Interface() // expect exactly one value + + return m.Matcher.Match(m.transformedValue) +} + +func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) { + return m.Matcher.FailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool { + // TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.) + // + // Querying the next matcher is fine if the transformer always will return the same value. + // But if the transformer is non-deterministic and returns a different value each time, then there + // is no point in querying the next matcher, since it can only comment on the last transformed value. + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue) +} diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform_test.go b/vendor/github.com/onsi/gomega/matchers/with_transform_test.go new file mode 100644 index 000000000..e52bf8e63 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/with_transform_test.go @@ -0,0 +1,102 @@ +package matchers_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("WithTransformMatcher", func() { + + var plus1 = func(i int) int { return i + 1 } + + Context("Panic if transform function invalid", func() { + panicsWithTransformer := func(transform interface{}) { + ExpectWithOffset(1, func() { WithTransform(transform, nil) }).To(Panic()) + } + It("nil", func() { + panicsWithTransformer(nil) + }) + Context("Invalid number of args, but correct return value count", func() { + It("zero", func() { + panicsWithTransformer(func() int { return 5 }) + }) + It("two", func() { + panicsWithTransformer(func(i, j int) int { return 5 }) + }) + }) + Context("Invalid number of return values, but correct number of arguments", func() { + It("zero", func() { + panicsWithTransformer(func(i int) {}) + }) + It("two", func() { + panicsWithTransformer(func(i int) (int, int) { return 5, 6 }) + }) + }) + }) + + It("works with positive cases", func() { + Expect(1).To(WithTransform(plus1, Equal(2))) + Expect(1).To(WithTransform(plus1, WithTransform(plus1, Equal(3)))) + Expect(1).To(WithTransform(plus1, And(Equal(2), BeNumerically(">", 1)))) + + // transform expects custom type + type S struct { + A int + B string + } + transformer := func(s S) string { return s.B } + Expect(S{1, "hi"}).To(WithTransform(transformer, Equal("hi"))) + + // transform expects interface + errString := func(e error) string { return e.Error() } + Expect(errors.New("abc")).To(WithTransform(errString, Equal("abc"))) + }) + + It("works with negative cases", func() { + Expect(1).ToNot(WithTransform(plus1, Equal(3))) + Expect(1).ToNot(WithTransform(plus1, WithTransform(plus1, Equal(2)))) + }) + + Context("failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + m := WithTransform(plus1, Equal(3)) + Expect(m.Match(1)).To(BeFalse()) + Expect(m.FailureMessage(1)).To(Equal("Expected\n : 2\nto equal\n : 3")) + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + m := Not(WithTransform(plus1, Equal(3))) + Expect(m.Match(2)).To(BeFalse()) + Expect(m.FailureMessage(2)).To(Equal("Expected\n : 3\nnot to equal\n : 3")) + }) + }) + + Context("actual value is incompatible with transform function's argument type", func() { + It("gracefully fails if transform cannot be performed", func() { + m := WithTransform(plus1, Equal(3)) + result, err := m.Match("hi") // give it a string but transform expects int; doesn't panic + Expect(result).To(BeFalse()) + Expect(err).To(MatchError("Transform function expects 'int' but we have 'string'")) + }) + }) + }) + + Context("MatchMayChangeInTheFuture()", func() { + It("Propagates value from wrapped matcher on the transformed value", func() { + m := WithTransform(plus1, Or()) // empty Or() always returns false, and indicates it cannot change + Expect(m.Match(1)).To(BeFalse()) + Expect(m.(*WithTransformMatcher).MatchMayChangeInTheFuture(1)).To(BeFalse()) // empty Or() indicates cannot change + }) + It("Defaults to true", func() { + m := WithTransform(plus1, Equal(2)) // Equal does not have this method + Expect(m.Match(1)).To(BeTrue()) + Expect(m.(*WithTransformMatcher).MatchMayChangeInTheFuture(1)).To(BeTrue()) // defaults to true + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go new file mode 100644 index 000000000..51256ede0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -0,0 +1,17 @@ +package types + +type GomegaFailHandler func(message string, callerSkip ...int) + +//A simple *testing.T interface wrapper +type GomegaTestingT interface { + Fatalf(format string, args ...interface{}) +} + +//All Gomega matchers must implement the GomegaMatcher interface +// +//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers +type GomegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore new file mode 100644 index 000000000..99e38bbc5 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.gitignore @@ -0,0 +1,2 @@ +test_program/test_program_bin +fuzz/ diff --git a/vendor/github.com/pelletier/go-toml/.travis.yml b/vendor/github.com/pelletier/go-toml/.travis.yml new file mode 100644 index 000000000..ab2775d7d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.travis.yml @@ -0,0 +1,22 @@ +sudo: false +language: go +go: + - 1.8.5 + - 1.9.2 + - tip +matrix: + allow_failures: + - go: tip + fast_finish: true +script: + - if [ -n "$(go fmt ./...)" ]; then exit 1; fi + - ./test.sh + - ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi +branches: + only: [master] +after_success: + - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=coverage.out -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE new file mode 100644 index 000000000..583bdae62 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md new file mode 100644 index 000000000..0d357acf3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -0,0 +1,131 @@ +# go-toml + +Go library for the [TOML](https://github.com/mojombo/toml) format. + +This library supports TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +[![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) +[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) +[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml) +[![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) + +## Features + +Go-toml provides the following features for using data parsed from TOML documents: + +* Load TOML documents from files and string data +* Easily navigate TOML structure using Tree +* Mashaling and unmarshaling to and from data structures +* Line & column position data for all parsed elements +* [Query support similar to JSON-Path](query/) +* Syntax errors contain line and column numbers + +## Import + +```go +import "github.com/pelletier/go-toml" +``` + +## Usage example + +Read a TOML document: + +```go +config, _ := toml.Load(` +[postgres] +user = "pelletier" +password = "mypassword"`) +// retrieve data directly +user := config.Get("postgres.user").(string) + +// or using an intermediate object +postgresConfig := config.Get("postgres").(*toml.Tree) +password := postgresConfig.Get("password").(string) +``` + +Or use Unmarshal: + +```go +type Postgres struct { + User string + Password string +} +type Config struct { + Postgres Postgres +} + +doc := []byte(` +[Postgres] +User = "pelletier" +Password = "mypassword"`) + +config := Config{} +toml.Unmarshal(doc, &config) +fmt.Println("user=", config.Postgres.User) +``` + +Or use a query: + +```go +// use a query to gather elements without walking the tree +q, _ := query.Compile("$..[user,password]") +results := q.Execute(config) +for ii, item := range results.Values() { + fmt.Println("Query result %d: %v", ii, item) +} +``` + +## Documentation + +The documentation and additional examples are available at +[godoc.org](http://godoc.org/github.com/pelletier/go-toml). + +## Tools + +Go-toml provides two handy command line tools: + +* `tomll`: Reads TOML files and lint them. + + ``` + go install github.com/pelletier/go-toml/cmd/tomll + tomll --help + ``` +* `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + go install github.com/pelletier/go-toml/cmd/tomljson + tomljson --help + ``` + +## Contribute + +Feel free to report bugs and patches using GitHub's pull requests system on +[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be +much appreciated! + +### Run tests + +You have to make sure two kind of tests run: + +1. The Go unit tests +2. The TOML examples base + +You can run both of them using `./test.sh`. + +### Fuzzing + +The script `./fuzz.sh` is available to +run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. + +## Versioning + +Go-toml follows [Semantic Versioning](http://semver.org/). The supported version +of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of +this document. The last two major versions of Go are supported +(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). + +## License + +The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/benchmark.json b/vendor/github.com/pelletier/go-toml/benchmark.json new file mode 100644 index 000000000..86f99c6a8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.json @@ -0,0 +1,164 @@ +{ + "array": { + "key1": [ + 1, + 2, + 3 + ], + "key2": [ + "red", + "yellow", + "green" + ], + "key3": [ + [ + 1, + 2 + ], + [ + 3, + 4, + 5 + ] + ], + "key4": [ + [ + 1, + 2 + ], + [ + "a", + "b", + "c" + ] + ], + "key5": [ + 1, + 2, + 3 + ], + "key6": [ + 1, + 2 + ] + }, + "boolean": { + "False": false, + "True": true + }, + "datetime": { + "key1": "1979-05-27T07:32:00Z", + "key2": "1979-05-27T00:32:00-07:00", + "key3": "1979-05-27T00:32:00.999999-07:00" + }, + "float": { + "both": { + "key": 6.626e-34 + }, + "exponent": { + "key1": 5e+22, + "key2": 1000000, + "key3": -0.02 + }, + "fractional": { + "key1": 1, + "key2": 3.1415, + "key3": -0.01 + }, + "underscores": { + "key1": 9224617.445991227, + "key2": 1e+100 + } + }, + "fruit": [{ + "name": "apple", + "physical": { + "color": "red", + "shape": "round" + }, + "variety": [{ + "name": "red delicious" + }, + { + "name": "granny smith" + } + ] + }, + { + "name": "banana", + "variety": [{ + "name": "plantain" + }] + } + ], + "integer": { + "key1": 99, + "key2": 42, + "key3": 0, + "key4": -17, + "underscores": { + "key1": 1000, + "key2": 5349221, + "key3": 12345 + } + }, + "products": [{ + "name": "Hammer", + "sku": 738594937 + }, + {}, + { + "color": "gray", + "name": "Nail", + "sku": 284758393 + } + ], + "string": { + "basic": { + "basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF." + }, + "literal": { + "multiline": { + "lines": "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n", + "regex2": "I [dw]on't need \\d{2} apples" + }, + "quoted": "Tom \"Dubs\" Preston-Werner", + "regex": "\u003c\\i\\c*\\s*\u003e", + "winpath": "C:\\Users\\nodejs\\templates", + "winpath2": "\\\\ServerX\\admin$\\system32\\" + }, + "multiline": { + "continued": { + "key1": "The quick brown fox jumps over the lazy dog.", + "key2": "The quick brown fox jumps over the lazy dog.", + "key3": "The quick brown fox jumps over the lazy dog." + }, + "key1": "One\nTwo", + "key2": "One\nTwo", + "key3": "One\nTwo" + } + }, + "table": { + "inline": { + "name": { + "first": "Tom", + "last": "Preston-Werner" + }, + "point": { + "x": 1, + "y": 2 + } + }, + "key": "value", + "subtable": { + "key": "another value" + } + }, + "x": { + "y": { + "z": { + "w": {} + } + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh new file mode 100755 index 000000000..8b8bb528e --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -e + +reference_ref=${1:-master} +reference_git=${2:-.} + +if ! `hash benchstat 2>/dev/null`; then + echo "Installing benchstat" + go get golang.org/x/perf/cmd/benchstat + go install golang.org/x/perf/cmd/benchstat +fi + +tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` +ref_tempdir="${tempdir}/ref" +ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" +local_benchmark="`pwd`/benchmark-local.txt" + +echo "=== ${reference_ref} (${ref_tempdir})" +git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null +pushd ${ref_tempdir} >/dev/null +git checkout ${reference_ref} >/dev/null 2>/dev/null +go test -bench=. -benchmem | tee ${ref_benchmark} +popd >/dev/null + +echo "" +echo "=== local" +go test -bench=. -benchmem | tee ${local_benchmark} + +echo "" +echo "=== diff" +benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/benchmark.toml b/vendor/github.com/pelletier/go-toml/benchmark.toml new file mode 100644 index 000000000..dfd77e096 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.toml @@ -0,0 +1,244 @@ +################################################################################ +## Comment + +# Speak your mind with the hash symbol. They go from the symbol to the end of +# the line. + + +################################################################################ +## Table + +# Tables (also known as hash tables or dictionaries) are collections of +# key/value pairs. They appear in square brackets on a line by themselves. + +[table] + +key = "value" # Yeah, you can do this. + +# Nested tables are denoted by table names with dots in them. Name your tables +# whatever crap you please, just don't use #, ., [ or ]. + +[table.subtable] + +key = "another value" + +# You don't need to specify all the super-tables if you don't want to. TOML +# knows how to do it for you. + +# [x] you +# [x.y] don't +# [x.y.z] need these +[x.y.z.w] # for this to work + + +################################################################################ +## Inline Table + +# Inline tables provide a more compact syntax for expressing tables. They are +# especially useful for grouped data that can otherwise quickly become verbose. +# Inline tables are enclosed in curly braces `{` and `}`. No newlines are +# allowed between the curly braces unless they are valid within a value. + +[table.inline] + +name = { first = "Tom", last = "Preston-Werner" } +point = { x = 1, y = 2 } + + +################################################################################ +## String + +# There are four ways to express strings: basic, multi-line basic, literal, and +# multi-line literal. All strings must contain only valid UTF-8 characters. + +[string.basic] + +basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF." + +[string.multiline] + +# The following strings are byte-for-byte equivalent: +key1 = "One\nTwo" +key2 = """One\nTwo""" +key3 = """ +One +Two""" + +[string.multiline.continued] + +# The following strings are byte-for-byte equivalent: +key1 = "The quick brown fox jumps over the lazy dog." + +key2 = """ +The quick brown \ + + + fox jumps over \ + the lazy dog.""" + +key3 = """\ + The quick brown \ + fox jumps over \ + the lazy dog.\ + """ + +[string.literal] + +# What you see is what you get. +winpath = 'C:\Users\nodejs\templates' +winpath2 = '\\ServerX\admin$\system32\' +quoted = 'Tom "Dubs" Preston-Werner' +regex = '<\i\c*\s*>' + + +[string.literal.multiline] + +regex2 = '''I [dw]on't need \d{2} apples''' +lines = ''' +The first newline is +trimmed in raw strings. + All other whitespace + is preserved. +''' + + +################################################################################ +## Integer + +# Integers are whole numbers. Positive numbers may be prefixed with a plus sign. +# Negative numbers are prefixed with a minus sign. + +[integer] + +key1 = +99 +key2 = 42 +key3 = 0 +key4 = -17 + +[integer.underscores] + +# For large numbers, you may use underscores to enhance readability. Each +# underscore must be surrounded by at least one digit. +key1 = 1_000 +key2 = 5_349_221 +key3 = 1_2_3_4_5 # valid but inadvisable + + +################################################################################ +## Float + +# A float consists of an integer part (which may be prefixed with a plus or +# minus sign) followed by a fractional part and/or an exponent part. + +[float.fractional] + +key1 = +1.0 +key2 = 3.1415 +key3 = -0.01 + +[float.exponent] + +key1 = 5e+22 +key2 = 1e6 +key3 = -2E-2 + +[float.both] + +key = 6.626e-34 + +[float.underscores] + +key1 = 9_224_617.445_991_228_313 +key2 = 1e1_00 + + +################################################################################ +## Boolean + +# Booleans are just the tokens you're used to. Always lowercase. + +[boolean] + +True = true +False = false + + +################################################################################ +## Datetime + +# Datetimes are RFC 3339 dates. + +[datetime] + +key1 = 1979-05-27T07:32:00Z +key2 = 1979-05-27T00:32:00-07:00 +key3 = 1979-05-27T00:32:00.999999-07:00 + + +################################################################################ +## Array + +# Arrays are square brackets with other primitives inside. Whitespace is +# ignored. Elements are separated by commas. Data types may not be mixed. + +[array] + +key1 = [ 1, 2, 3 ] +key2 = [ "red", "yellow", "green" ] +key3 = [ [ 1, 2 ], [3, 4, 5] ] +#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok + +# Arrays can also be multiline. So in addition to ignoring whitespace, arrays +# also ignore newlines between the brackets. Terminating commas are ok before +# the closing bracket. + +key5 = [ + 1, 2, 3 +] +key6 = [ + 1, + 2, # this is ok +] + + +################################################################################ +## Array of Tables + +# These can be expressed by using a table name in double brackets. Each table +# with the same double bracketed name will be an element in the array. The +# tables are inserted in the order encountered. + +[[products]] + +name = "Hammer" +sku = 738594937 + +[[products]] + +[[products]] + +name = "Nail" +sku = 284758393 +color = "gray" + + +# You can create nested arrays of tables as well. + +[[fruit]] + name = "apple" + + [fruit.physical] + color = "red" + shape = "round" + + [[fruit.variety]] + name = "red delicious" + + [[fruit.variety]] + name = "granny smith" + +[[fruit]] + name = "banana" + + [[fruit.variety]] + name = "plantain" diff --git a/vendor/github.com/pelletier/go-toml/benchmark.yml b/vendor/github.com/pelletier/go-toml/benchmark.yml new file mode 100644 index 000000000..0bd19f08a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.yml @@ -0,0 +1,121 @@ +--- +array: + key1: + - 1 + - 2 + - 3 + key2: + - red + - yellow + - green + key3: + - - 1 + - 2 + - - 3 + - 4 + - 5 + key4: + - - 1 + - 2 + - - a + - b + - c + key5: + - 1 + - 2 + - 3 + key6: + - 1 + - 2 +boolean: + 'False': false + 'True': true +datetime: + key1: '1979-05-27T07:32:00Z' + key2: '1979-05-27T00:32:00-07:00' + key3: '1979-05-27T00:32:00.999999-07:00' +float: + both: + key: 6.626e-34 + exponent: + key1: 5.0e+22 + key2: 1000000 + key3: -0.02 + fractional: + key1: 1 + key2: 3.1415 + key3: -0.01 + underscores: + key1: 9224617.445991227 + key2: 1.0e+100 +fruit: +- name: apple + physical: + color: red + shape: round + variety: + - name: red delicious + - name: granny smith +- name: banana + variety: + - name: plantain +integer: + key1: 99 + key2: 42 + key3: 0 + key4: -17 + underscores: + key1: 1000 + key2: 5349221 + key3: 12345 +products: +- name: Hammer + sku: 738594937 +- {} +- color: gray + name: Nail + sku: 284758393 +string: + basic: + basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF." + literal: + multiline: + lines: | + The first newline is + trimmed in raw strings. + All other whitespace + is preserved. + regex2: I [dw]on't need \d{2} apples + quoted: Tom "Dubs" Preston-Werner + regex: "<\\i\\c*\\s*>" + winpath: C:\Users\nodejs\templates + winpath2: "\\\\ServerX\\admin$\\system32\\" + multiline: + continued: + key1: The quick brown fox jumps over the lazy dog. + key2: The quick brown fox jumps over the lazy dog. + key3: The quick brown fox jumps over the lazy dog. + key1: |- + One + Two + key2: |- + One + Two + key3: |- + One + Two +table: + inline: + name: + first: Tom + last: Preston-Werner + point: + x: 1 + y: 2 + key: value + subtable: + key: another value +x: + y: + z: + w: {} diff --git a/vendor/github.com/pelletier/go-toml/benchmark_test.go b/vendor/github.com/pelletier/go-toml/benchmark_test.go new file mode 100644 index 000000000..e1f209dfa --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark_test.go @@ -0,0 +1,192 @@ +package toml + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "testing" + "time" + + burntsushi "github.com/BurntSushi/toml" + yaml "gopkg.in/yaml.v2" +) + +type benchmarkDoc struct { + Table struct { + Key string + Subtable struct { + Key string + } + Inline struct { + Name struct { + First string + Last string + } + Point struct { + X int64 + U int64 + } + } + } + String struct { + Basic struct { + Basic string + } + Multiline struct { + Key1 string + Key2 string + Key3 string + Continued struct { + Key1 string + Key2 string + Key3 string + } + } + Literal struct { + Winpath string + Winpath2 string + Quoted string + Regex string + Multiline struct { + Regex2 string + Lines string + } + } + } + Integer struct { + Key1 int64 + Key2 int64 + Key3 int64 + Key4 int64 + Underscores struct { + Key1 int64 + Key2 int64 + Key3 int64 + } + } + Float struct { + Fractional struct { + Key1 float64 + Key2 float64 + Key3 float64 + } + Exponent struct { + Key1 float64 + Key2 float64 + Key3 float64 + } + Both struct { + Key float64 + } + Underscores struct { + Key1 float64 + Key2 float64 + } + } + Boolean struct { + True bool + False bool + } + Datetime struct { + Key1 time.Time + Key2 time.Time + Key3 time.Time + } + Array struct { + Key1 []int64 + Key2 []string + Key3 [][]int64 + // TODO: Key4 not supported by go-toml's Unmarshal + Key5 []int64 + Key6 []int64 + } + Products []struct { + Name string + Sku int64 + Color string + } + Fruit []struct { + Name string + Physical struct { + Color string + Shape string + Variety []struct { + Name string + } + } + } +} + +func BenchmarkParseToml(b *testing.B) { + fileBytes, err := ioutil.ReadFile("benchmark.toml") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := LoadReader(bytes.NewReader(fileBytes)) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkUnmarshalToml(b *testing.B) { + bytes, err := ioutil.ReadFile("benchmark.toml") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + target := benchmarkDoc{} + err := Unmarshal(bytes, &target) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkUnmarshalBurntSushiToml(b *testing.B) { + bytes, err := ioutil.ReadFile("benchmark.toml") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + target := benchmarkDoc{} + err := burntsushi.Unmarshal(bytes, &target) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkUnmarshalJson(b *testing.B) { + bytes, err := ioutil.ReadFile("benchmark.json") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + target := benchmarkDoc{} + err := json.Unmarshal(bytes, &target) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkUnmarshalYaml(b *testing.B) { + bytes, err := ioutil.ReadFile("benchmark.yml") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + target := benchmarkDoc{} + err := yaml.Unmarshal(bytes, &target) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/cmd/test_program.go b/vendor/github.com/pelletier/go-toml/cmd/test_program.go new file mode 100644 index 000000000..73077f614 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/cmd/test_program.go @@ -0,0 +1,91 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "time" + + "github.com/pelletier/go-toml" +) + +func main() { + bytes, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Fatalf("Error during TOML read: %s", err) + os.Exit(2) + } + tree, err := toml.Load(string(bytes)) + if err != nil { + log.Fatalf("Error during TOML load: %s", err) + os.Exit(1) + } + + typedTree := translate(*tree) + + if err := json.NewEncoder(os.Stdout).Encode(typedTree); err != nil { + log.Fatalf("Error encoding JSON: %s", err) + os.Exit(3) + } + + os.Exit(0) +} + +func translate(tomlData interface{}) interface{} { + switch orig := tomlData.(type) { + case map[string]interface{}: + typed := make(map[string]interface{}, len(orig)) + for k, v := range orig { + typed[k] = translate(v) + } + return typed + case *toml.Tree: + return translate(*orig) + case toml.Tree: + keys := orig.Keys() + typed := make(map[string]interface{}, len(keys)) + for _, k := range keys { + typed[k] = translate(orig.GetPath([]string{k})) + } + return typed + case []*toml.Tree: + typed := make([]map[string]interface{}, len(orig)) + for i, v := range orig { + typed[i] = translate(v).(map[string]interface{}) + } + return typed + case []map[string]interface{}: + typed := make([]map[string]interface{}, len(orig)) + for i, v := range orig { + typed[i] = translate(v).(map[string]interface{}) + } + return typed + case []interface{}: + typed := make([]interface{}, len(orig)) + for i, v := range orig { + typed[i] = translate(v) + } + return tag("array", typed) + case time.Time: + return tag("datetime", orig.Format("2006-01-02T15:04:05Z")) + case bool: + return tag("bool", fmt.Sprintf("%v", orig)) + case int64: + return tag("integer", fmt.Sprintf("%d", orig)) + case float64: + return tag("float", fmt.Sprintf("%v", orig)) + case string: + return tag("string", orig) + } + + panic(fmt.Sprintf("Unknown type: %T", tomlData)) +} + +func tag(typeName string, data interface{}) map[string]interface{} { + return map[string]interface{}{ + "type": typeName, + "value": data, + } +} diff --git a/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go b/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go new file mode 100644 index 000000000..b2d6fc673 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go @@ -0,0 +1,72 @@ +// Tomljson reads TOML and converts to JSON. +// +// Usage: +// cat file.toml | tomljson > file.json +// tomljson file1.toml > file.json +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "os" + + "github.com/pelletier/go-toml" +) + +func main() { + flag.Usage = func() { + fmt.Fprintln(os.Stderr, `tomljson can be used in two ways: +Writing to STDIN and reading from STDOUT: + cat file.toml | tomljson > file.json + +Reading from a file name: + tomljson file.toml +`) + } + flag.Parse() + os.Exit(processMain(flag.Args(), os.Stdin, os.Stdout, os.Stderr)) +} + +func processMain(files []string, defaultInput io.Reader, output io.Writer, errorOutput io.Writer) int { + // read from stdin and print to stdout + inputReader := defaultInput + + if len(files) > 0 { + var err error + inputReader, err = os.Open(files[0]) + if err != nil { + printError(err, errorOutput) + return -1 + } + } + s, err := reader(inputReader) + if err != nil { + printError(err, errorOutput) + return -1 + } + io.WriteString(output, s+"\n") + return 0 +} + +func printError(err error, output io.Writer) { + io.WriteString(output, err.Error()+"\n") +} + +func reader(r io.Reader) (string, error) { + tree, err := toml.LoadReader(r) + if err != nil { + return "", err + } + return mapToJSON(tree) +} + +func mapToJSON(tree *toml.Tree) (string, error) { + treeMap := tree.ToMap() + bytes, err := json.MarshalIndent(treeMap, "", " ") + if err != nil { + return "", err + } + return string(bytes[:]), nil +} diff --git a/vendor/github.com/pelletier/go-toml/cmd/tomljson/main_test.go b/vendor/github.com/pelletier/go-toml/cmd/tomljson/main_test.go new file mode 100644 index 000000000..0b4bdbb11 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/cmd/tomljson/main_test.go @@ -0,0 +1,82 @@ +package main + +import ( + "bytes" + "io/ioutil" + "os" + "strings" + "testing" +) + +func expectBufferEquality(t *testing.T, name string, buffer *bytes.Buffer, expected string) { + output := buffer.String() + if output != expected { + t.Errorf("incorrect %s:\n%s\n\nexpected %s:\n%s", name, output, name, expected) + t.Log([]rune(output)) + t.Log([]rune(expected)) + } +} + +func expectProcessMainResults(t *testing.T, input string, args []string, exitCode int, expectedOutput string, expectedError string) { + inputReader := strings.NewReader(input) + outputBuffer := new(bytes.Buffer) + errorBuffer := new(bytes.Buffer) + + returnCode := processMain(args, inputReader, outputBuffer, errorBuffer) + + expectBufferEquality(t, "output", outputBuffer, expectedOutput) + expectBufferEquality(t, "error", errorBuffer, expectedError) + + if returnCode != exitCode { + t.Error("incorrect return code:", returnCode, "expected", exitCode) + } +} + +func TestProcessMainReadFromStdin(t *testing.T) { + input := ` + [mytoml] + a = 42` + expectedOutput := `{ + "mytoml": { + "a": 42 + } +} +` + expectedError := `` + expectedExitCode := 0 + + expectProcessMainResults(t, input, []string{}, expectedExitCode, expectedOutput, expectedError) +} + +func TestProcessMainReadFromFile(t *testing.T) { + input := ` + [mytoml] + a = 42` + + tmpfile, err := ioutil.TempFile("", "example.toml") + if err != nil { + t.Fatal(err) + } + if _, err := tmpfile.Write([]byte(input)); err != nil { + t.Fatal(err) + } + + defer os.Remove(tmpfile.Name()) + + expectedOutput := `{ + "mytoml": { + "a": 42 + } +} +` + expectedError := `` + expectedExitCode := 0 + + expectProcessMainResults(t, ``, []string{tmpfile.Name()}, expectedExitCode, expectedOutput, expectedError) +} + +func TestProcessMainReadFromMissingFile(t *testing.T) { + expectedError := `open /this/file/does/not/exist: no such file or directory +` + expectProcessMainResults(t, ``, []string{"/this/file/does/not/exist"}, -1, ``, expectedError) +} diff --git a/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go b/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go new file mode 100644 index 000000000..36c7e3759 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go @@ -0,0 +1,66 @@ +// Tomll is a linter for TOML +// +// Usage: +// cat file.toml | tomll > file_linted.toml +// tomll file1.toml file2.toml # lint the two files in place +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/pelletier/go-toml" +) + +func main() { + flag.Usage = func() { + fmt.Fprintln(os.Stderr, `tomll can be used in two ways: +Writing to STDIN and reading from STDOUT: + cat file.toml | tomll > file.toml + +Reading and updating a list of files: + tomll a.toml b.toml c.toml + +When given a list of files, tomll will modify all files in place without asking. +`) + } + flag.Parse() + // read from stdin and print to stdout + if flag.NArg() == 0 { + s, err := lintReader(os.Stdin) + if err != nil { + io.WriteString(os.Stderr, err.Error()) + os.Exit(-1) + } + io.WriteString(os.Stdout, s) + } else { + // otherwise modify a list of files + for _, filename := range flag.Args() { + s, err := lintFile(filename) + if err != nil { + io.WriteString(os.Stderr, err.Error()) + os.Exit(-1) + } + ioutil.WriteFile(filename, []byte(s), 0644) + } + } +} + +func lintFile(filename string) (string, error) { + tree, err := toml.LoadFile(filename) + if err != nil { + return "", err + } + return tree.String(), nil +} + +func lintReader(r io.Reader) (string, error) { + tree, err := toml.LoadReader(r) + if err != nil { + return "", err + } + return tree.String(), nil +} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go new file mode 100644 index 000000000..d5fd98c02 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -0,0 +1,23 @@ +// Package toml is a TOML parser and manipulation library. +// +// This version supports the specification as described in +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md +// +// Marshaling +// +// Go-toml can marshal and unmarshal TOML documents from and to data +// structures. +// +// TOML document as a tree +// +// Go-toml can operate on a TOML document as a tree. Use one of the Load* +// functions to parse TOML data and obtain a Tree instance, then one of its +// methods to manipulate the tree. +// +// JSONPath-like queries +// +// The package github.com/pelletier/go-toml/query implements a system +// similar to JSONPath to quickly retrieve elements of a TOML document using a +// single expression. See the package documentation for more information. +// +package toml diff --git a/vendor/github.com/pelletier/go-toml/doc_test.go b/vendor/github.com/pelletier/go-toml/doc_test.go new file mode 100644 index 000000000..3b8171b22 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/doc_test.go @@ -0,0 +1,105 @@ +// code examples for godoc + +package toml_test + +import ( + "fmt" + "log" + + toml "github.com/pelletier/go-toml" +) + +func Example_tree() { + config, err := toml.LoadFile("config.toml") + + if err != nil { + fmt.Println("Error ", err.Error()) + } else { + // retrieve data directly + user := config.Get("postgres.user").(string) + password := config.Get("postgres.password").(string) + + // or using an intermediate object + configTree := config.Get("postgres").(*toml.Tree) + user = configTree.Get("user").(string) + password = configTree.Get("password").(string) + fmt.Println("User is", user, " and password is", password) + + // show where elements are in the file + fmt.Printf("User position: %v\n", configTree.GetPosition("user")) + fmt.Printf("Password position: %v\n", configTree.GetPosition("password")) + } +} + +func Example_unmarshal() { + type Employer struct { + Name string + Phone string + } + type Person struct { + Name string + Age int64 + Employer Employer + } + + document := []byte(` + name = "John" + age = 30 + [employer] + name = "Company Inc." + phone = "+1 234 567 89012" + `) + + person := Person{} + toml.Unmarshal(document, &person) + fmt.Println(person.Name, "is", person.Age, "and works at", person.Employer.Name) + // Output: + // John is 30 and works at Company Inc. +} + +func ExampleMarshal() { + type Postgres struct { + User string `toml:"user"` + Password string `toml:"password"` + Database string `toml:"db" commented:"true" comment:"not used anymore"` + } + type Config struct { + Postgres Postgres `toml:"postgres" comment:"Postgres configuration"` + } + + config := Config{Postgres{User: "pelletier", Password: "mypassword", Database: "old_database"}} + b, err := toml.Marshal(config) + if err != nil { + log.Fatal(err) + } + fmt.Println(string(b)) + // Output: + // # Postgres configuration + // [postgres] + // + // # not used anymore + // # db = "old_database" + // password = "mypassword" + // user = "pelletier" +} + +func ExampleUnmarshal() { + type Postgres struct { + User string + Password string + } + type Config struct { + Postgres Postgres + } + + doc := []byte(` + [postgres] + user = "pelletier" + password = "mypassword"`) + + config := Config{} + toml.Unmarshal(doc, &config) + fmt.Println("user=", config.Postgres.User) + // Output: + // user= pelletier +} diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml new file mode 100644 index 000000000..12950a163 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -0,0 +1,29 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml new file mode 100644 index 000000000..3d902f282 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example.toml @@ -0,0 +1,29 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go new file mode 100644 index 000000000..14570c8d3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package toml + +func Fuzz(data []byte) int { + tree, err := LoadBytes(data) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + str, err := tree.ToTomlString() + if err != nil { + if str != "" { + panic(`str must be "" if there is an error`) + } + panic(err) + } + + tree, err = Load(str) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + return 1 +} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh new file mode 100755 index 000000000..3204b4c44 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.sh @@ -0,0 +1,15 @@ +#! /bin/sh +set -eu + +go get github.com/dvyukov/go-fuzz/go-fuzz +go get github.com/dvyukov/go-fuzz/go-fuzz-build + +if [ ! -e toml-fuzz.zip ]; then + go-fuzz-build github.com/pelletier/go-toml +fi + +rm -fr fuzz +mkdir -p fuzz/corpus +cp *.toml fuzz/corpus + +go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go new file mode 100644 index 000000000..284db6467 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -0,0 +1,85 @@ +// Parsing keys handling both bare and quoted keys. + +package toml + +import ( + "bytes" + "errors" + "fmt" + "unicode" +) + +// Convert the bare key group string to an array. +// The input supports double quotation to allow "." inside the key name, +// but escape sequences are not supported. Lexers must unescape them beforehand. +func parseKey(key string) ([]string, error) { + groups := []string{} + var buffer bytes.Buffer + inQuotes := false + wasInQuotes := false + ignoreSpace := true + expectDot := false + + for _, char := range key { + if ignoreSpace { + if char == ' ' { + continue + } + ignoreSpace = false + } + switch char { + case '"': + if inQuotes { + groups = append(groups, buffer.String()) + buffer.Reset() + wasInQuotes = true + } + inQuotes = !inQuotes + expectDot = false + case '.': + if inQuotes { + buffer.WriteRune(char) + } else { + if !wasInQuotes { + if buffer.Len() == 0 { + return nil, errors.New("empty table key") + } + groups = append(groups, buffer.String()) + buffer.Reset() + } + ignoreSpace = true + expectDot = false + wasInQuotes = false + } + case ' ': + if inQuotes { + buffer.WriteRune(char) + } else { + expectDot = true + } + default: + if !inQuotes && !isValidBareChar(char) { + return nil, fmt.Errorf("invalid bare character: %c", char) + } + if !inQuotes && expectDot { + return nil, errors.New("what?") + } + buffer.WriteRune(char) + expectDot = false + } + } + if inQuotes { + return nil, errors.New("mismatched quotes") + } + if buffer.Len() > 0 { + groups = append(groups, buffer.String()) + } + if len(groups) == 0 { + return nil, errors.New("empty key") + } + return groups, nil +} + +func isValidBareChar(r rune) bool { + return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r) +} diff --git a/vendor/github.com/pelletier/go-toml/keysparsing_test.go b/vendor/github.com/pelletier/go-toml/keysparsing_test.go new file mode 100644 index 000000000..84cb82604 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/keysparsing_test.go @@ -0,0 +1,63 @@ +package toml + +import ( + "fmt" + "testing" +) + +func testResult(t *testing.T, key string, expected []string) { + parsed, err := parseKey(key) + t.Logf("key=%s expected=%s parsed=%s", key, expected, parsed) + if err != nil { + t.Fatal("Unexpected error:", err) + } + if len(expected) != len(parsed) { + t.Fatal("Expected length", len(expected), "but", len(parsed), "parsed") + } + for index, expectedKey := range expected { + if expectedKey != parsed[index] { + t.Fatal("Expected", expectedKey, "at index", index, "but found", parsed[index]) + } + } +} + +func testError(t *testing.T, key string, expectedError string) { + res, err := parseKey(key) + if err == nil { + t.Fatalf("Expected error, but succesfully parsed key %s", res) + } + if fmt.Sprintf("%s", err) != expectedError { + t.Fatalf("Expected error \"%s\", but got \"%s\".", expectedError, err) + } +} + +func TestBareKeyBasic(t *testing.T) { + testResult(t, "test", []string{"test"}) +} + +func TestBareKeyDotted(t *testing.T) { + testResult(t, "this.is.a.key", []string{"this", "is", "a", "key"}) +} + +func TestDottedKeyBasic(t *testing.T) { + testResult(t, "\"a.dotted.key\"", []string{"a.dotted.key"}) +} + +func TestBaseKeyPound(t *testing.T) { + testError(t, "hello#world", "invalid bare character: #") +} + +func TestQuotedKeys(t *testing.T) { + testResult(t, `hello."foo".bar`, []string{"hello", "foo", "bar"}) + testResult(t, `"hello!"`, []string{"hello!"}) + testResult(t, `foo."ba.r".baz`, []string{"foo", "ba.r", "baz"}) + + // escape sequences must not be converted + testResult(t, `"hello\tworld"`, []string{`hello\tworld`}) +} + +func TestEmptyKey(t *testing.T) { + testError(t, "", "empty key") + testError(t, " ", "empty key") + testResult(t, `""`, []string{""}) +} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go new file mode 100644 index 000000000..d11de4285 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -0,0 +1,750 @@ +// TOML lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package toml + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +var dateRegexp *regexp.Regexp + +// Define state functions +type tomlLexStateFn func() tomlLexStateFn + +// Define lexer +type tomlLexer struct { + inputIdx int + input []rune // Textual source + currentTokenStart int + currentTokenStop int + tokens []token + depth int + line int + col int + endbufferLine int + endbufferCol int +} + +// Basic read operations on input + +func (l *tomlLexer) read() rune { + r := l.peek() + if r == '\n' { + l.endbufferLine++ + l.endbufferCol = 1 + } else { + l.endbufferCol++ + } + l.inputIdx++ + return r +} + +func (l *tomlLexer) next() rune { + r := l.read() + + if r != eof { + l.currentTokenStop++ + } + return r +} + +func (l *tomlLexer) ignore() { + l.currentTokenStart = l.currentTokenStop + l.line = l.endbufferLine + l.col = l.endbufferCol +} + +func (l *tomlLexer) skip() { + l.next() + l.ignore() +} + +func (l *tomlLexer) fastForward(n int) { + for i := 0; i < n; i++ { + l.next() + } +} + +func (l *tomlLexer) emitWithValue(t tokenType, value string) { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: t, + val: value, + }) + l.ignore() +} + +func (l *tomlLexer) emit(t tokenType) { + l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) +} + +func (l *tomlLexer) peek() rune { + if l.inputIdx >= len(l.input) { + return eof + } + return l.input[l.inputIdx] +} + +func (l *tomlLexer) peekString(size int) string { + maxIdx := len(l.input) + upperIdx := l.inputIdx + size // FIXME: potential overflow + if upperIdx > maxIdx { + upperIdx = maxIdx + } + return string(l.input[l.inputIdx:upperIdx]) +} + +func (l *tomlLexer) follow(next string) bool { + return next == l.peekString(len(next)) +} + +// Error management + +func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + }) + return nil +} + +// State functions + +func (l *tomlLexer) lexVoid() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '[': + return l.lexTableKey + case '#': + return l.lexComment(l.lexVoid) + case '=': + return l.lexEqual + case '\r': + fallthrough + case '\n': + l.skip() + continue + } + + if isSpace(next) { + l.skip() + } + + if l.depth > 0 { + return l.lexRvalue + } + + if isKeyStartChar(next) { + return l.lexKey + } + + if next == eof { + l.next() + break + } + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexRvalue() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '.': + return l.errorf("cannot start float with a dot") + case '=': + return l.lexEqual + case '[': + l.depth++ + return l.lexLeftBracket + case ']': + l.depth-- + return l.lexRightBracket + case '{': + return l.lexLeftCurlyBrace + case '}': + return l.lexRightCurlyBrace + case '#': + return l.lexComment(l.lexRvalue) + case '"': + return l.lexString + case '\'': + return l.lexLiteralString + case ',': + return l.lexComma + case '\r': + fallthrough + case '\n': + l.skip() + if l.depth == 0 { + return l.lexVoid + } + return l.lexRvalue + case '_': + return l.errorf("cannot start number with underscore") + } + + if l.follow("true") { + return l.lexTrue + } + + if l.follow("false") { + return l.lexFalse + } + + if l.follow("inf") { + return l.lexInf + } + + if l.follow("nan") { + return l.lexNan + } + + if isSpace(next) { + l.skip() + continue + } + + if next == eof { + l.next() + break + } + + possibleDate := l.peekString(35) + dateMatch := dateRegexp.FindString(possibleDate) + if dateMatch != "" { + l.fastForward(len(dateMatch)) + return l.lexDate + } + + if next == '+' || next == '-' || isDigit(next) { + return l.lexNumber + } + + if isAlphanumeric(next) { + return l.lexKey + } + + return l.errorf("no value can start with %c", next) + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenLeftCurlyBrace) + return l.lexRvalue +} + +func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenRightCurlyBrace) + return l.lexRvalue +} + +func (l *tomlLexer) lexDate() tomlLexStateFn { + l.emit(tokenDate) + return l.lexRvalue +} + +func (l *tomlLexer) lexTrue() tomlLexStateFn { + l.fastForward(4) + l.emit(tokenTrue) + return l.lexRvalue +} + +func (l *tomlLexer) lexFalse() tomlLexStateFn { + l.fastForward(5) + l.emit(tokenFalse) + return l.lexRvalue +} + +func (l *tomlLexer) lexInf() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenInf) + return l.lexRvalue +} + +func (l *tomlLexer) lexNan() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenNan) + return l.lexRvalue +} + +func (l *tomlLexer) lexEqual() tomlLexStateFn { + l.next() + l.emit(tokenEqual) + return l.lexRvalue +} + +func (l *tomlLexer) lexComma() tomlLexStateFn { + l.next() + l.emit(tokenComma) + return l.lexRvalue +} + +// Parse the key and emits its value without escape sequences. +// bare keys, basic string keys and literal string keys are supported. +func (l *tomlLexer) lexKey() tomlLexStateFn { + growingString := "" + + for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { + if r == '"' { + l.next() + str, err := l.lexStringAsString(`"`, false, true) + if err != nil { + return l.errorf(err.Error()) + } + growingString += str + l.next() + continue + } else if r == '\'' { + l.next() + str, err := l.lexLiteralStringAsString(`'`, false) + if err != nil { + return l.errorf(err.Error()) + } + growingString += str + l.next() + continue + } else if r == '\n' { + return l.errorf("keys cannot contain new lines") + } else if isSpace(r) { + break + } else if !isValidBareChar(r) { + return l.errorf("keys cannot contain %c character", r) + } + growingString += string(r) + l.next() + } + l.emitWithValue(tokenKey, growingString) + return l.lexVoid +} + +func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { + return func() tomlLexStateFn { + for next := l.peek(); next != '\n' && next != eof; next = l.peek() { + if next == '\r' && l.follow("\r\n") { + break + } + l.next() + } + l.ignore() + return previousState + } +} + +func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { + l.next() + l.emit(tokenLeftBracket) + return l.lexRvalue +} + +func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { + growingString := "" + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + // find end of string + for { + if l.follow(terminator) { + return growingString, nil + } + + next := l.peek() + if next == eof { + break + } + growingString += string(l.next()) + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexLiteralString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := "'" + discardLeadingNewLine := false + if l.follow("''") { + l.skip() + l.skip() + terminator = "'''" + discardLeadingNewLine = true + } + + str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +// Lex a string and return the results as a string. +// Terminator is the substring indicating the end of the token. +// The resulting string does not include the terminator. +func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { + growingString := "" + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + for { + if l.follow(terminator) { + return growingString, nil + } + + if l.follow("\\") { + l.next() + switch l.peek() { + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + // skip all whitespace chars following backslash + for strings.ContainsRune("\r\n\t ", l.peek()) { + l.next() + } + case '"': + growingString += "\"" + l.next() + case 'n': + growingString += "\n" + l.next() + case 'b': + growingString += "\b" + l.next() + case 'f': + growingString += "\f" + l.next() + case '/': + growingString += "/" + l.next() + case 't': + growingString += "\t" + l.next() + case 'r': + growingString += "\r" + l.next() + case '\\': + growingString += "\\" + l.next() + case 'u': + l.next() + code := "" + for i := 0; i < 4; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code = code + string(c) + } + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return "", errors.New("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + case 'U': + l.next() + code := "" + for i := 0; i < 8; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code = code + string(c) + } + intcode, err := strconv.ParseInt(code, 16, 64) + if err != nil { + return "", errors.New("invalid unicode escape: \\U" + code) + } + growingString += string(rune(intcode)) + default: + return "", errors.New("invalid escape sequence: \\" + string(l.peek())) + } + } else { + r := l.peek() + + if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) { + return "", fmt.Errorf("unescaped control character %U", r) + } + l.next() + growingString += string(r) + } + + if l.peek() == eof { + break + } + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := `"` + discardLeadingNewLine := false + acceptNewLines := false + if l.follow(`""`) { + l.skip() + l.skip() + terminator = `"""` + discardLeadingNewLine = true + acceptNewLines = true + } + + str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) + + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +func (l *tomlLexer) lexTableKey() tomlLexStateFn { + l.next() + + if l.peek() == '[' { + // token '[[' signifies an array of tables + l.next() + l.emit(tokenDoubleLeftBracket) + return l.lexInsideTableArrayKey + } + // vanilla table key + l.emit(tokenLeftBracket) + return l.lexInsideTableKey +} + +// Parse the key till "]]", but only bare keys are supported +func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroupArray) + } + l.next() + if l.peek() != ']' { + break + } + l.next() + l.emit(tokenDoubleRightBracket) + return l.lexVoid + case '[': + return l.errorf("table array key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table array key") +} + +// Parse the key till "]" but only bare keys are supported +func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroup) + } + l.next() + l.emit(tokenRightBracket) + return l.lexVoid + case '[': + return l.errorf("table key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table key") +} + +func (l *tomlLexer) lexRightBracket() tomlLexStateFn { + l.next() + l.emit(tokenRightBracket) + return l.lexRvalue +} + +type validRuneFn func(r rune) bool + +func isValidHexRune(r rune) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r rune) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r rune) bool { + return r == '0' || r == '1' || r == '_' +} + +func (l *tomlLexer) lexNumber() tomlLexStateFn { + r := l.peek() + + if r == '0' { + follow := l.peekString(2) + if len(follow) == 2 { + var isValidRune validRuneFn + switch follow[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { + return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) + } + } + + if isValidRune != nil { + l.next() + l.next() + digitSeen := false + for { + next := l.peek() + if !isValidRune(next) { + break + } + digitSeen = true + l.next() + } + + if !digitSeen { + return l.errorf("number needs at least one digit") + } + + l.emit(tokenInteger) + + return l.lexRvalue + } + } + } + + if r == '+' || r == '-' { + l.next() + if l.follow("inf") { + return l.lexInf + } + if l.follow("nan") { + return l.lexNan + } + } + + pointSeen := false + expSeen := false + digitSeen := false + for { + next := l.peek() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + l.next() + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if next == 'e' || next == 'E' { + expSeen = true + l.next() + r := l.peek() + if r == '+' || r == '-' { + l.next() + } + } else if isDigit(next) { + digitSeen = true + l.next() + } else if next == '_' { + l.next() + } else { + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen || expSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexRvalue +} + +func (l *tomlLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } +} + +func init() { + dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) +} + +// Entry point +func lexToml(inputBytes []byte) []token { + runes := bytes.Runes(inputBytes) + l := &tomlLexer{ + input: runes, + tokens: make([]token, 0, 256), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/lexer_test.go b/vendor/github.com/pelletier/go-toml/lexer_test.go new file mode 100644 index 000000000..cb4913031 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/lexer_test.go @@ -0,0 +1,750 @@ +package toml + +import ( + "reflect" + "testing" +) + +func testFlow(t *testing.T, input string, expectedFlow []token) { + tokens := lexToml([]byte(input)) + if !reflect.DeepEqual(tokens, expectedFlow) { + t.Fatal("Different flows. Expected\n", expectedFlow, "\nGot:\n", tokens) + } +} + +func TestValidKeyGroup(t *testing.T) { + testFlow(t, "[hello world]", []token{ + {Position{1, 1}, tokenLeftBracket, "["}, + {Position{1, 2}, tokenKeyGroup, "hello world"}, + {Position{1, 13}, tokenRightBracket, "]"}, + {Position{1, 14}, tokenEOF, ""}, + }) +} + +func TestNestedQuotedUnicodeKeyGroup(t *testing.T) { + testFlow(t, `[ j . "ʞ" . l ]`, []token{ + {Position{1, 1}, tokenLeftBracket, "["}, + {Position{1, 2}, tokenKeyGroup, ` j . "ʞ" . l `}, + {Position{1, 15}, tokenRightBracket, "]"}, + {Position{1, 16}, tokenEOF, ""}, + }) +} + +func TestUnclosedKeyGroup(t *testing.T) { + testFlow(t, "[hello world", []token{ + {Position{1, 1}, tokenLeftBracket, "["}, + {Position{1, 2}, tokenError, "unclosed table key"}, + }) +} + +func TestComment(t *testing.T) { + testFlow(t, "# blahblah", []token{ + {Position{1, 11}, tokenEOF, ""}, + }) +} + +func TestKeyGroupComment(t *testing.T) { + testFlow(t, "[hello world] # blahblah", []token{ + {Position{1, 1}, tokenLeftBracket, "["}, + {Position{1, 2}, tokenKeyGroup, "hello world"}, + {Position{1, 13}, tokenRightBracket, "]"}, + {Position{1, 25}, tokenEOF, ""}, + }) +} + +func TestMultipleKeyGroupsComment(t *testing.T) { + testFlow(t, "[hello world] # blahblah\n[test]", []token{ + {Position{1, 1}, tokenLeftBracket, "["}, + {Position{1, 2}, tokenKeyGroup, "hello world"}, + {Position{1, 13}, tokenRightBracket, "]"}, + {Position{2, 1}, tokenLeftBracket, "["}, + {Position{2, 2}, tokenKeyGroup, "test"}, + {Position{2, 6}, tokenRightBracket, "]"}, + {Position{2, 7}, tokenEOF, ""}, + }) +} + +func TestSimpleWindowsCRLF(t *testing.T) { + testFlow(t, "a=4\r\nb=2", []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 2}, tokenEqual, "="}, + {Position{1, 3}, tokenInteger, "4"}, + {Position{2, 1}, tokenKey, "b"}, + {Position{2, 2}, tokenEqual, "="}, + {Position{2, 3}, tokenInteger, "2"}, + {Position{2, 4}, tokenEOF, ""}, + }) +} + +func TestBasicKey(t *testing.T) { + testFlow(t, "hello", []token{ + {Position{1, 1}, tokenKey, "hello"}, + {Position{1, 6}, tokenEOF, ""}, + }) +} + +func TestBasicKeyWithUnderscore(t *testing.T) { + testFlow(t, "hello_hello", []token{ + {Position{1, 1}, tokenKey, "hello_hello"}, + {Position{1, 12}, tokenEOF, ""}, + }) +} + +func TestBasicKeyWithDash(t *testing.T) { + testFlow(t, "hello-world", []token{ + {Position{1, 1}, tokenKey, "hello-world"}, + {Position{1, 12}, tokenEOF, ""}, + }) +} + +func TestBasicKeyWithUppercaseMix(t *testing.T) { + testFlow(t, "helloHELLOHello", []token{ + {Position{1, 1}, tokenKey, "helloHELLOHello"}, + {Position{1, 16}, tokenEOF, ""}, + }) +} + +func TestBasicKeyWithInternationalCharacters(t *testing.T) { + testFlow(t, "héllÖ", []token{ + {Position{1, 1}, tokenKey, "héllÖ"}, + {Position{1, 6}, tokenEOF, ""}, + }) +} + +func TestBasicKeyAndEqual(t *testing.T) { + testFlow(t, "hello =", []token{ + {Position{1, 1}, tokenKey, "hello"}, + {Position{1, 7}, tokenEqual, "="}, + {Position{1, 8}, tokenEOF, ""}, + }) +} + +func TestKeyWithSharpAndEqual(t *testing.T) { + testFlow(t, "key#name = 5", []token{ + {Position{1, 1}, tokenError, "keys cannot contain # character"}, + }) +} + +func TestKeyWithSymbolsAndEqual(t *testing.T) { + testFlow(t, "~!@$^&*()_+-`1234567890[]\\|/?><.,;:' = 5", []token{ + {Position{1, 1}, tokenError, "keys cannot contain ~ character"}, + }) +} + +func TestKeyEqualStringEscape(t *testing.T) { + testFlow(t, `foo = "hello\""`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, "hello\""}, + {Position{1, 16}, tokenEOF, ""}, + }) +} + +func TestKeyEqualStringUnfinished(t *testing.T) { + testFlow(t, `foo = "bar`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenError, "unclosed string"}, + }) +} + +func TestKeyEqualString(t *testing.T) { + testFlow(t, `foo = "bar"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, "bar"}, + {Position{1, 12}, tokenEOF, ""}, + }) +} + +func TestKeyEqualTrue(t *testing.T) { + testFlow(t, "foo = true", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenTrue, "true"}, + {Position{1, 11}, tokenEOF, ""}, + }) +} + +func TestKeyEqualFalse(t *testing.T) { + testFlow(t, "foo = false", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenFalse, "false"}, + {Position{1, 12}, tokenEOF, ""}, + }) +} + +func TestArrayNestedString(t *testing.T) { + testFlow(t, `a = [ ["hello", "world"] ]`, []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenLeftBracket, "["}, + {Position{1, 7}, tokenLeftBracket, "["}, + {Position{1, 9}, tokenString, "hello"}, + {Position{1, 15}, tokenComma, ","}, + {Position{1, 18}, tokenString, "world"}, + {Position{1, 24}, tokenRightBracket, "]"}, + {Position{1, 26}, tokenRightBracket, "]"}, + {Position{1, 27}, tokenEOF, ""}, + }) +} + +func TestArrayNestedInts(t *testing.T) { + testFlow(t, "a = [ [42, 21], [10] ]", []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenLeftBracket, "["}, + {Position{1, 7}, tokenLeftBracket, "["}, + {Position{1, 8}, tokenInteger, "42"}, + {Position{1, 10}, tokenComma, ","}, + {Position{1, 12}, tokenInteger, "21"}, + {Position{1, 14}, tokenRightBracket, "]"}, + {Position{1, 15}, tokenComma, ","}, + {Position{1, 17}, tokenLeftBracket, "["}, + {Position{1, 18}, tokenInteger, "10"}, + {Position{1, 20}, tokenRightBracket, "]"}, + {Position{1, 22}, tokenRightBracket, "]"}, + {Position{1, 23}, tokenEOF, ""}, + }) +} + +func TestArrayInts(t *testing.T) { + testFlow(t, "a = [ 42, 21, 10, ]", []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenLeftBracket, "["}, + {Position{1, 7}, tokenInteger, "42"}, + {Position{1, 9}, tokenComma, ","}, + {Position{1, 11}, tokenInteger, "21"}, + {Position{1, 13}, tokenComma, ","}, + {Position{1, 15}, tokenInteger, "10"}, + {Position{1, 17}, tokenComma, ","}, + {Position{1, 19}, tokenRightBracket, "]"}, + {Position{1, 20}, tokenEOF, ""}, + }) +} + +func TestMultilineArrayComments(t *testing.T) { + testFlow(t, "a = [1, # wow\n2, # such items\n3, # so array\n]", []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenLeftBracket, "["}, + {Position{1, 6}, tokenInteger, "1"}, + {Position{1, 7}, tokenComma, ","}, + {Position{2, 1}, tokenInteger, "2"}, + {Position{2, 2}, tokenComma, ","}, + {Position{3, 1}, tokenInteger, "3"}, + {Position{3, 2}, tokenComma, ","}, + {Position{4, 1}, tokenRightBracket, "]"}, + {Position{4, 2}, tokenEOF, ""}, + }) +} + +func TestNestedArraysComment(t *testing.T) { + toml := ` +someArray = [ +# does not work +["entry1"] +]` + testFlow(t, toml, []token{ + {Position{2, 1}, tokenKey, "someArray"}, + {Position{2, 11}, tokenEqual, "="}, + {Position{2, 13}, tokenLeftBracket, "["}, + {Position{4, 1}, tokenLeftBracket, "["}, + {Position{4, 3}, tokenString, "entry1"}, + {Position{4, 10}, tokenRightBracket, "]"}, + {Position{5, 1}, tokenRightBracket, "]"}, + {Position{5, 2}, tokenEOF, ""}, + }) +} + +func TestKeyEqualArrayBools(t *testing.T) { + testFlow(t, "foo = [true, false, true]", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenLeftBracket, "["}, + {Position{1, 8}, tokenTrue, "true"}, + {Position{1, 12}, tokenComma, ","}, + {Position{1, 14}, tokenFalse, "false"}, + {Position{1, 19}, tokenComma, ","}, + {Position{1, 21}, tokenTrue, "true"}, + {Position{1, 25}, tokenRightBracket, "]"}, + {Position{1, 26}, tokenEOF, ""}, + }) +} + +func TestKeyEqualArrayBoolsWithComments(t *testing.T) { + testFlow(t, "foo = [true, false, true] # YEAH", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenLeftBracket, "["}, + {Position{1, 8}, tokenTrue, "true"}, + {Position{1, 12}, tokenComma, ","}, + {Position{1, 14}, tokenFalse, "false"}, + {Position{1, 19}, tokenComma, ","}, + {Position{1, 21}, tokenTrue, "true"}, + {Position{1, 25}, tokenRightBracket, "]"}, + {Position{1, 33}, tokenEOF, ""}, + }) +} + +func TestDateRegexp(t *testing.T) { + if dateRegexp.FindString("1979-05-27T07:32:00Z") == "" { + t.Error("basic lexing") + } + if dateRegexp.FindString("1979-05-27T00:32:00-07:00") == "" { + t.Error("offset lexing") + } + if dateRegexp.FindString("1979-05-27T00:32:00.999999-07:00") == "" { + t.Error("nano precision lexing") + } +} + +func TestKeyEqualDate(t *testing.T) { + testFlow(t, "foo = 1979-05-27T07:32:00Z", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenDate, "1979-05-27T07:32:00Z"}, + {Position{1, 27}, tokenEOF, ""}, + }) + testFlow(t, "foo = 1979-05-27T00:32:00-07:00", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenDate, "1979-05-27T00:32:00-07:00"}, + {Position{1, 32}, tokenEOF, ""}, + }) + testFlow(t, "foo = 1979-05-27T00:32:00.999999-07:00", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenDate, "1979-05-27T00:32:00.999999-07:00"}, + {Position{1, 39}, tokenEOF, ""}, + }) +} + +func TestFloatEndingWithDot(t *testing.T) { + testFlow(t, "foo = 42.", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenError, "float cannot end with a dot"}, + }) +} + +func TestFloatWithTwoDots(t *testing.T) { + testFlow(t, "foo = 4.2.", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenError, "cannot have two dots in one float"}, + }) +} + +func TestFloatWithExponent1(t *testing.T) { + testFlow(t, "a = 5e+22", []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenFloat, "5e+22"}, + {Position{1, 10}, tokenEOF, ""}, + }) +} + +func TestFloatWithExponent2(t *testing.T) { + testFlow(t, "a = 5E+22", []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenFloat, "5E+22"}, + {Position{1, 10}, tokenEOF, ""}, + }) +} + +func TestFloatWithExponent3(t *testing.T) { + testFlow(t, "a = -5e+22", []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenFloat, "-5e+22"}, + {Position{1, 11}, tokenEOF, ""}, + }) +} + +func TestFloatWithExponent4(t *testing.T) { + testFlow(t, "a = -5e-22", []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenFloat, "-5e-22"}, + {Position{1, 11}, tokenEOF, ""}, + }) +} + +func TestFloatWithExponent5(t *testing.T) { + testFlow(t, "a = 6.626e-34", []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenFloat, "6.626e-34"}, + {Position{1, 14}, tokenEOF, ""}, + }) +} + +func TestInvalidEsquapeSequence(t *testing.T) { + testFlow(t, `foo = "\x"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenError, "invalid escape sequence: \\x"}, + }) +} + +func TestNestedArrays(t *testing.T) { + testFlow(t, "foo = [[[]]]", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenLeftBracket, "["}, + {Position{1, 8}, tokenLeftBracket, "["}, + {Position{1, 9}, tokenLeftBracket, "["}, + {Position{1, 10}, tokenRightBracket, "]"}, + {Position{1, 11}, tokenRightBracket, "]"}, + {Position{1, 12}, tokenRightBracket, "]"}, + {Position{1, 13}, tokenEOF, ""}, + }) +} + +func TestKeyEqualNumber(t *testing.T) { + testFlow(t, "foo = 42", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenInteger, "42"}, + {Position{1, 9}, tokenEOF, ""}, + }) + + testFlow(t, "foo = +42", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenInteger, "+42"}, + {Position{1, 10}, tokenEOF, ""}, + }) + + testFlow(t, "foo = -42", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenInteger, "-42"}, + {Position{1, 10}, tokenEOF, ""}, + }) + + testFlow(t, "foo = 4.2", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenFloat, "4.2"}, + {Position{1, 10}, tokenEOF, ""}, + }) + + testFlow(t, "foo = +4.2", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenFloat, "+4.2"}, + {Position{1, 11}, tokenEOF, ""}, + }) + + testFlow(t, "foo = -4.2", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenFloat, "-4.2"}, + {Position{1, 11}, tokenEOF, ""}, + }) + + testFlow(t, "foo = 1_000", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenInteger, "1_000"}, + {Position{1, 12}, tokenEOF, ""}, + }) + + testFlow(t, "foo = 5_349_221", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenInteger, "5_349_221"}, + {Position{1, 16}, tokenEOF, ""}, + }) + + testFlow(t, "foo = 1_2_3_4_5", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenInteger, "1_2_3_4_5"}, + {Position{1, 16}, tokenEOF, ""}, + }) + + testFlow(t, "flt8 = 9_224_617.445_991_228_313", []token{ + {Position{1, 1}, tokenKey, "flt8"}, + {Position{1, 6}, tokenEqual, "="}, + {Position{1, 8}, tokenFloat, "9_224_617.445_991_228_313"}, + {Position{1, 33}, tokenEOF, ""}, + }) + + testFlow(t, "foo = +", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenError, "no digit in that number"}, + }) +} + +func TestMultiline(t *testing.T) { + testFlow(t, "foo = 42\nbar=21", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 7}, tokenInteger, "42"}, + {Position{2, 1}, tokenKey, "bar"}, + {Position{2, 4}, tokenEqual, "="}, + {Position{2, 5}, tokenInteger, "21"}, + {Position{2, 7}, tokenEOF, ""}, + }) +} + +func TestKeyEqualStringUnicodeEscape(t *testing.T) { + testFlow(t, `foo = "hello \u2665"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, "hello ♥"}, + {Position{1, 21}, tokenEOF, ""}, + }) + testFlow(t, `foo = "hello \U000003B4"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, "hello δ"}, + {Position{1, 25}, tokenEOF, ""}, + }) + testFlow(t, `foo = "\uabcd"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, "\uabcd"}, + {Position{1, 15}, tokenEOF, ""}, + }) + testFlow(t, `foo = "\uABCD"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, "\uABCD"}, + {Position{1, 15}, tokenEOF, ""}, + }) + testFlow(t, `foo = "\U000bcdef"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, "\U000bcdef"}, + {Position{1, 19}, tokenEOF, ""}, + }) + testFlow(t, `foo = "\U000BCDEF"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, "\U000BCDEF"}, + {Position{1, 19}, tokenEOF, ""}, + }) + testFlow(t, `foo = "\u2"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenError, "unfinished unicode escape"}, + }) + testFlow(t, `foo = "\U2"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenError, "unfinished unicode escape"}, + }) +} + +func TestKeyEqualStringNoEscape(t *testing.T) { + testFlow(t, "foo = \"hello \u0002\"", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenError, "unescaped control character U+0002"}, + }) + testFlow(t, "foo = \"hello \u001F\"", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenError, "unescaped control character U+001F"}, + }) +} + +func TestLiteralString(t *testing.T) { + testFlow(t, `foo = 'C:\Users\nodejs\templates'`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, `C:\Users\nodejs\templates`}, + {Position{1, 34}, tokenEOF, ""}, + }) + testFlow(t, `foo = '\\ServerX\admin$\system32\'`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, `\\ServerX\admin$\system32\`}, + {Position{1, 35}, tokenEOF, ""}, + }) + testFlow(t, `foo = 'Tom "Dubs" Preston-Werner'`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, `Tom "Dubs" Preston-Werner`}, + {Position{1, 34}, tokenEOF, ""}, + }) + testFlow(t, `foo = '<\i\c*\s*>'`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, `<\i\c*\s*>`}, + {Position{1, 19}, tokenEOF, ""}, + }) + testFlow(t, `foo = 'C:\Users\nodejs\unfinis`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenError, "unclosed string"}, + }) +} + +func TestMultilineLiteralString(t *testing.T) { + testFlow(t, `foo = '''hello 'literal' world'''`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 10}, tokenString, `hello 'literal' world`}, + {Position{1, 34}, tokenEOF, ""}, + }) + + testFlow(t, "foo = '''\nhello\n'literal'\nworld'''", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{2, 1}, tokenString, "hello\n'literal'\nworld"}, + {Position{4, 9}, tokenEOF, ""}, + }) + testFlow(t, "foo = '''\r\nhello\r\n'literal'\r\nworld'''", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{2, 1}, tokenString, "hello\r\n'literal'\r\nworld"}, + {Position{4, 9}, tokenEOF, ""}, + }) +} + +func TestMultilineString(t *testing.T) { + testFlow(t, `foo = """hello "literal" world"""`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 10}, tokenString, `hello "literal" world`}, + {Position{1, 34}, tokenEOF, ""}, + }) + + testFlow(t, "foo = \"\"\"\r\nhello\\\r\n\"literal\"\\\nworld\"\"\"", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{2, 1}, tokenString, "hello\"literal\"world"}, + {Position{4, 9}, tokenEOF, ""}, + }) + + testFlow(t, "foo = \"\"\"\\\n \\\n \\\n hello\\\nmultiline\\\nworld\"\"\"", []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 10}, tokenString, "hellomultilineworld"}, + {Position{6, 9}, tokenEOF, ""}, + }) + + testFlow(t, "key2 = \"\"\"\nThe quick brown \\\n\n\n fox jumps over \\\n the lazy dog.\"\"\"", []token{ + {Position{1, 1}, tokenKey, "key2"}, + {Position{1, 6}, tokenEqual, "="}, + {Position{2, 1}, tokenString, "The quick brown fox jumps over the lazy dog."}, + {Position{6, 21}, tokenEOF, ""}, + }) + + testFlow(t, "key2 = \"\"\"\\\n The quick brown \\\n fox jumps over \\\n the lazy dog.\\\n \"\"\"", []token{ + {Position{1, 1}, tokenKey, "key2"}, + {Position{1, 6}, tokenEqual, "="}, + {Position{1, 11}, tokenString, "The quick brown fox jumps over the lazy dog."}, + {Position{5, 11}, tokenEOF, ""}, + }) + + testFlow(t, `key2 = "Roses are red\nViolets are blue"`, []token{ + {Position{1, 1}, tokenKey, "key2"}, + {Position{1, 6}, tokenEqual, "="}, + {Position{1, 9}, tokenString, "Roses are red\nViolets are blue"}, + {Position{1, 41}, tokenEOF, ""}, + }) + + testFlow(t, "key2 = \"\"\"\nRoses are red\nViolets are blue\"\"\"", []token{ + {Position{1, 1}, tokenKey, "key2"}, + {Position{1, 6}, tokenEqual, "="}, + {Position{2, 1}, tokenString, "Roses are red\nViolets are blue"}, + {Position{3, 20}, tokenEOF, ""}, + }) +} + +func TestUnicodeString(t *testing.T) { + testFlow(t, `foo = "hello ♥ world"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, "hello ♥ world"}, + {Position{1, 22}, tokenEOF, ""}, + }) +} +func TestEscapeInString(t *testing.T) { + testFlow(t, `foo = "\b\f\/"`, []token{ + {Position{1, 1}, tokenKey, "foo"}, + {Position{1, 5}, tokenEqual, "="}, + {Position{1, 8}, tokenString, "\b\f/"}, + {Position{1, 15}, tokenEOF, ""}, + }) +} + +func TestKeyGroupArray(t *testing.T) { + testFlow(t, "[[foo]]", []token{ + {Position{1, 1}, tokenDoubleLeftBracket, "[["}, + {Position{1, 3}, tokenKeyGroupArray, "foo"}, + {Position{1, 6}, tokenDoubleRightBracket, "]]"}, + {Position{1, 8}, tokenEOF, ""}, + }) +} + +func TestQuotedKey(t *testing.T) { + testFlow(t, "\"a b\" = 42", []token{ + {Position{1, 1}, tokenKey, "a b"}, + {Position{1, 7}, tokenEqual, "="}, + {Position{1, 9}, tokenInteger, "42"}, + {Position{1, 11}, tokenEOF, ""}, + }) +} + +func TestKeyNewline(t *testing.T) { + testFlow(t, "a\n= 4", []token{ + {Position{1, 1}, tokenError, "keys cannot contain new lines"}, + }) +} + +func TestInvalidFloat(t *testing.T) { + testFlow(t, "a=7e1_", []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 2}, tokenEqual, "="}, + {Position{1, 3}, tokenFloat, "7e1_"}, + {Position{1, 7}, tokenEOF, ""}, + }) +} + +func TestLexUnknownRvalue(t *testing.T) { + testFlow(t, `a = !b`, []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenError, "no value can start with !"}, + }) + + testFlow(t, `a = \b`, []token{ + {Position{1, 1}, tokenKey, "a"}, + {Position{1, 3}, tokenEqual, "="}, + {Position{1, 5}, tokenError, `no value can start with \`}, + }) +} + +func BenchmarkLexer(b *testing.B) { + sample := `title = "Hugo: A Fast and Flexible Website Generator" +baseurl = "http://gohugo.io/" +MetaDataFormat = "yaml" +pluralizeListTitles = false + +[params] + description = "Documentation of Hugo, a fast and flexible static site generator built with love by spf13, bep and friends in Go" + author = "Steve Francia (spf13) and friends" + release = "0.22-DEV" + +[[menu.main]] + name = "Download Hugo" + pre = "" + url = "https://github.com/spf13/hugo/releases" + weight = -200 +` + b.ResetTimer() + for i := 0; i < b.N; i++ { + lexToml([]byte(sample)) + } +} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go new file mode 100644 index 000000000..b5a241505 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -0,0 +1,600 @@ +package toml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +type tomlOpts struct { + name string + comment string + commented bool + include bool + omitempty bool +} + +type encOpts struct { + quoteMapKeys bool + arraysOneElementPerLine bool +} + +var encOptsDefaults = encOpts{ + quoteMapKeys: false, +} + +var timeType = reflect.TypeOf(time.Time{}) +var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() + +// Check if the given marshall type maps to a Tree primitive +func isPrimitive(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isPrimitive(mtype.Elem()) + case reflect.Bool: + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Struct: + return mtype == timeType || isCustomMarshaler(mtype) + default: + return false + } +} + +// Check if the given marshall type maps to a Tree slice +func isTreeSlice(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Slice: + return !isOtherSlice(mtype) + default: + return false + } +} + +// Check if the given marshall type maps to a non-Tree slice +func isOtherSlice(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isOtherSlice(mtype.Elem()) + case reflect.Slice: + return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem()) + default: + return false + } +} + +// Check if the given marshall type maps to a Tree +func isTree(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Map: + return true + case reflect.Struct: + return !isPrimitive(mtype) + default: + return false + } +} + +func isCustomMarshaler(mtype reflect.Type) bool { + return mtype.Implements(marshalerType) +} + +func callCustomMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(Marshaler).MarshalTOML() +} + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +/* +Marshal returns the TOML encoding of v. Behavior is similar to the Go json +encoder, except that there is no concept of a Marshaler interface or MarshalTOML +function for sub-structs, and currently only definite types can be marshaled +(i.e. no `interface{}`). + +The following struct annotations are supported: + + toml:"Field" Overrides the field's name to output. + omitempty When set, empty values and groups are not emitted. + comment:"comment" Emits a # comment on the same line. This supports new lines. + commented:"true" Emits the value as commented. + +Note that pointers are automatically assigned the "omitempty" option, as TOML +explicitly does not handle null values (saying instead the label should be +dropped). + +Tree structural types and corresponding marshal types: + + *Tree (*)struct, (*)map[string]interface{} + []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} + []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) + interface{} (*)primitive + +Tree primitive types and corresponding marshal types: + + uint64 uint, uint8-uint64, pointers to same + int64 int, int8-uint64, pointers to same + float64 float32, float64, pointers to same + string string, pointers to same + bool bool, pointers to same + time.Time time.Time{}, pointers to same +*/ +func Marshal(v interface{}) ([]byte, error) { + return NewEncoder(nil).marshal(v) +} + +// Encoder writes TOML values to an output stream. +type Encoder struct { + w io.Writer + encOpts +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + encOpts: encOptsDefaults, + } +} + +// Encode writes the TOML encoding of v to the stream. +// +// See the documentation for Marshal for details. +func (e *Encoder) Encode(v interface{}) error { + b, err := e.marshal(v) + if err != nil { + return err + } + if _, err := e.w.Write(b); err != nil { + return err + } + return nil +} + +// QuoteMapKeys sets up the encoder to encode +// maps with string type keys with quoted TOML keys. +// +// This relieves the character limitations on map keys. +func (e *Encoder) QuoteMapKeys(v bool) *Encoder { + e.quoteMapKeys = v + return e +} + +// ArraysWithOneElementPerLine sets up the encoder to encode arrays +// with more than one element on multiple lines instead of one. +// +// For example: +// +// A = [1,2,3] +// +// Becomes +// +// A = [ +// 1, +// 2, +// 3 +// ] +func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { + e.arraysOneElementPerLine = v + return e +} + +func (e *Encoder) marshal(v interface{}) ([]byte, error) { + mtype := reflect.TypeOf(v) + if mtype.Kind() != reflect.Struct { + return []byte{}, errors.New("Only a struct can be marshaled to TOML") + } + sval := reflect.ValueOf(v) + if isCustomMarshaler(mtype) { + return callCustomMarshaler(sval) + } + t, err := e.valueToTree(mtype, sval) + if err != nil { + return []byte{}, err + } + + var buf bytes.Buffer + _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine) + + return buf.Bytes(), err +} + +// Convert given marshal struct or map value to toml tree +func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToTree(mtype.Elem(), mval.Elem()) + } + tval := newTree() + switch mtype.Kind() { + case reflect.Struct: + for i := 0; i < mtype.NumField(); i++ { + mtypef, mvalf := mtype.Field(i), mval.Field(i) + opts := tomlOptions(mtypef) + if opts.include && (!opts.omitempty || !isZero(mvalf)) { + val, err := e.valueToToml(mtypef.Type, mvalf) + if err != nil { + return nil, err + } + tval.SetWithComment(opts.name, opts.comment, opts.commented, val) + } + } + case reflect.Map: + for _, key := range mval.MapKeys() { + mvalf := mval.MapIndex(key) + val, err := e.valueToToml(mtype.Elem(), mvalf) + if err != nil { + return nil, err + } + if e.quoteMapKeys { + keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine) + if err != nil { + return nil, err + } + tval.SetPath([]string{keyStr}, val) + } else { + tval.Set(key.String(), val) + } + } + } + return tval, nil +} + +// Convert given marshal slice to slice of Toml trees +func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { + tval := make([]*Tree, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal slice to slice of toml values +func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + tval := make([]interface{}, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal value to toml value +func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToToml(mtype.Elem(), mval.Elem()) + } + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTree(mtype): + return e.valueToTree(mtype, mval) + case isTreeSlice(mtype): + return e.valueToTreeSlice(mtype, mval) + case isOtherSlice(mtype): + return e.valueToOtherSlice(mtype, mval) + default: + switch mtype.Kind() { + case reflect.Bool: + return mval.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return mval.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return mval.Uint(), nil + case reflect.Float32, reflect.Float64: + return mval.Float(), nil + case reflect.String: + return mval.String(), nil + case reflect.Struct: + return mval.Interface().(time.Time), nil + default: + return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) + } + } +} + +// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. +// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for +// sub-structs, and only definite types can be unmarshaled. +func (t *Tree) Unmarshal(v interface{}) error { + d := Decoder{tval: t} + return d.unmarshal(v) +} + +// Marshal returns the TOML encoding of Tree. +// See Marshal() documentation for types mapping table. +func (t *Tree) Marshal() ([]byte, error) { + var buf bytes.Buffer + err := NewEncoder(&buf).Encode(t) + return buf.Bytes(), err +} + +// Unmarshal parses the TOML-encoded data and stores the result in the value +// pointed to by v. Behavior is similar to the Go json encoder, except that there +// is no concept of an Unmarshaler interface or UnmarshalTOML function for +// sub-structs, and currently only definite types can be unmarshaled to (i.e. no +// `interface{}`). +// +// The following struct annotations are supported: +// +// toml:"Field" Overrides the field's name to map to. +// +// See Marshal() documentation for types mapping table. +func Unmarshal(data []byte, v interface{}) error { + t, err := LoadReader(bytes.NewReader(data)) + if err != nil { + return err + } + return t.Unmarshal(v) +} + +// Decoder reads and decodes TOML values from an input stream. +type Decoder struct { + r io.Reader + tval *Tree + encOpts +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + encOpts: encOptsDefaults, + } +} + +// Decode reads a TOML-encoded value from it's input +// and unmarshals it in the value pointed at by v. +// +// See the documentation for Marshal for details. +func (d *Decoder) Decode(v interface{}) error { + var err error + d.tval, err = LoadReader(d.r) + if err != nil { + return err + } + return d.unmarshal(v) +} + +func (d *Decoder) unmarshal(v interface{}) error { + mtype := reflect.TypeOf(v) + if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { + return errors.New("Only a pointer to struct can be unmarshaled from TOML") + } + + sval, err := d.valueFromTree(mtype.Elem(), d.tval) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(sval) + return nil +} + +// Convert toml tree to marshal struct or map, using marshal type +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval) + } + var mval reflect.Value + switch mtype.Kind() { + case reflect.Struct: + mval = reflect.New(mtype).Elem() + for i := 0; i < mtype.NumField(); i++ { + mtypef := mtype.Field(i) + opts := tomlOptions(mtypef) + if opts.include { + baseKey := opts.name + keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)} + for _, key := range keysToTry { + exists := tval.Has(key) + if !exists { + continue + } + val := tval.Get(key) + mvalf, err := d.valueFromToml(mtypef.Type, val) + if err != nil { + return mval, formatError(err, tval.GetPosition(key)) + } + mval.Field(i).Set(mvalf) + break + } + } + } + case reflect.Map: + mval = reflect.MakeMap(mtype) + for _, key := range tval.Keys() { + // TODO: path splits key + val := tval.GetPath([]string{key}) + mvalf, err := d.valueFromToml(mtype.Elem(), val) + if err != nil { + return mval, formatError(err, tval.GetPosition(key)) + } + mval.SetMapIndex(reflect.ValueOf(key), mvalf) + } + } + return mval, nil +} + +// Convert toml value to marshal struct/map slice, using marshal type +func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { + mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + for i := 0; i < len(tval); i++ { + val, err := d.valueFromTree(mtype.Elem(), tval[i]) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { + mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + for i := 0; i < len(tval); i++ { + val, err := d.valueFromToml(mtype.Elem(), tval[i]) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal value, using marshal type +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval) + } + + switch tval.(type) { + case *Tree: + if isTree(mtype) { + return d.valueFromTree(mtype, tval.(*Tree)) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) + case []*Tree: + if isTreeSlice(mtype) { + return d.valueFromTreeSlice(mtype, tval.([]*Tree)) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) + case []interface{}: + if isOtherSlice(mtype) { + return d.valueFromOtherSlice(mtype, tval.([]interface{})) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) + default: + switch mtype.Kind() { + case reflect.Bool, reflect.Struct: + val := reflect.ValueOf(tval) + // if this passes for when mtype is reflect.Struct, tval is a time.Time + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.String: + val := reflect.ValueOf(tval) + // stupidly, int64 is convertible to string. So special case this. + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if val.Int() < 0 { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Float32, reflect.Float64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + default: + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + } + } +} + +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val, err := d.valueFromToml(mtype.Elem(), tval) + if err != nil { + return reflect.ValueOf(nil), err + } + mval := reflect.New(mtype.Elem()) + mval.Elem().Set(val) + return mval, nil +} + +func tomlOptions(vf reflect.StructField) tomlOpts { + tag := vf.Tag.Get("toml") + parse := strings.Split(tag, ",") + var comment string + if c := vf.Tag.Get("comment"); c != "" { + comment = c + } + commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) + result := tomlOpts{name: vf.Name, comment: comment, commented: commented, include: true, omitempty: false} + if parse[0] != "" { + if parse[0] == "-" && len(parse) == 1 { + result.include = false + } else { + result.name = strings.Trim(parse[0], " ") + } + } + if vf.PkgPath != "" { + result.include = false + } + if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { + result.omitempty = true + } + if vf.Type.Kind() == reflect.Ptr { + result.omitempty = true + } + return result +} + +func isZero(val reflect.Value) bool { + switch val.Type().Kind() { + case reflect.Map: + fallthrough + case reflect.Array: + fallthrough + case reflect.Slice: + return val.Len() == 0 + default: + return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) + } +} + +func formatError(err error, pos Position) error { + if err.Error()[0] == '(' { // Error already contains position information + return err + } + return fmt.Errorf("%s: %s", pos, err) +} diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.go b/vendor/github.com/pelletier/go-toml/marshal_test.go new file mode 100644 index 000000000..291a80d2a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_test.go @@ -0,0 +1,806 @@ +package toml + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "reflect" + "strings" + "testing" + "time" +) + +type basicMarshalTestStruct struct { + String string `toml:"string"` + StringList []string `toml:"strlist"` + Sub basicMarshalTestSubStruct `toml:"subdoc"` + SubList []basicMarshalTestSubStruct `toml:"sublist"` +} + +type basicMarshalTestSubStruct struct { + String2 string +} + +var basicTestData = basicMarshalTestStruct{ + String: "Hello", + StringList: []string{"Howdy", "Hey There"}, + Sub: basicMarshalTestSubStruct{"One"}, + SubList: []basicMarshalTestSubStruct{{"Two"}, {"Three"}}, +} + +var basicTestToml = []byte(`string = "Hello" +strlist = ["Howdy","Hey There"] + +[subdoc] + String2 = "One" + +[[sublist]] + String2 = "Two" + +[[sublist]] + String2 = "Three" +`) + +func TestBasicMarshal(t *testing.T) { + result, err := Marshal(basicTestData) + if err != nil { + t.Fatal(err) + } + expected := basicTestToml + if !bytes.Equal(result, expected) { + t.Errorf("Bad marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) + } +} + +func TestBasicUnmarshal(t *testing.T) { + result := basicMarshalTestStruct{} + err := Unmarshal(basicTestToml, &result) + expected := basicTestData + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("Bad unmarshal: expected %v, got %v", expected, result) + } +} + +type testDoc struct { + Title string `toml:"title"` + Basics testDocBasics `toml:"basic"` + BasicLists testDocBasicLists `toml:"basic_lists"` + BasicMap map[string]string `toml:"basic_map"` + Subdocs testDocSubs `toml:"subdoc"` + SubDocList []testSubDoc `toml:"subdoclist"` + SubDocPtrs []*testSubDoc `toml:"subdocptrs"` + err int `toml:"shouldntBeHere"` + unexported int `toml:"shouldntBeHere"` + Unexported2 int `toml:"-"` +} + +type testDocBasics struct { + Bool bool `toml:"bool"` + Date time.Time `toml:"date"` + Float float32 `toml:"float"` + Int int `toml:"int"` + Uint uint `toml:"uint"` + String *string `toml:"string"` + unexported int `toml:"shouldntBeHere"` +} + +type testDocBasicLists struct { + Bools []bool `toml:"bools"` + Dates []time.Time `toml:"dates"` + Floats []*float32 `toml:"floats"` + Ints []int `toml:"ints"` + Strings []string `toml:"strings"` + UInts []uint `toml:"uints"` +} + +type testDocSubs struct { + First testSubDoc `toml:"first"` + Second *testSubDoc `toml:"second"` +} + +type testSubDoc struct { + Name string `toml:"name"` + unexported int `toml:"shouldntBeHere"` +} + +var biteMe = "Bite me" +var float1 float32 = 12.3 +var float2 float32 = 45.6 +var float3 float32 = 78.9 +var subdoc = testSubDoc{"Second", 0} + +var docData = testDoc{ + Title: "TOML Marshal Testing", + unexported: 0, + Unexported2: 0, + Basics: testDocBasics{ + Bool: true, + Date: time.Date(1979, 5, 27, 7, 32, 0, 0, time.UTC), + Float: 123.4, + Int: 5000, + Uint: 5001, + String: &biteMe, + unexported: 0, + }, + BasicLists: testDocBasicLists{ + Bools: []bool{true, false, true}, + Dates: []time.Time{ + time.Date(1979, 5, 27, 7, 32, 0, 0, time.UTC), + time.Date(1980, 5, 27, 7, 32, 0, 0, time.UTC), + }, + Floats: []*float32{&float1, &float2, &float3}, + Ints: []int{8001, 8001, 8002}, + Strings: []string{"One", "Two", "Three"}, + UInts: []uint{5002, 5003}, + }, + BasicMap: map[string]string{ + "one": "one", + "two": "two", + }, + Subdocs: testDocSubs{ + First: testSubDoc{"First", 0}, + Second: &subdoc, + }, + SubDocList: []testSubDoc{ + {"List.First", 0}, + {"List.Second", 0}, + }, + SubDocPtrs: []*testSubDoc{&subdoc}, +} + +func TestDocMarshal(t *testing.T) { + result, err := Marshal(docData) + if err != nil { + t.Fatal(err) + } + expected, _ := ioutil.ReadFile("marshal_test.toml") + if !bytes.Equal(result, expected) { + t.Errorf("Bad marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) + } +} + +func TestDocUnmarshal(t *testing.T) { + result := testDoc{} + tomlData, _ := ioutil.ReadFile("marshal_test.toml") + err := Unmarshal(tomlData, &result) + expected := docData + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(result, expected) { + resStr, _ := json.MarshalIndent(result, "", " ") + expStr, _ := json.MarshalIndent(expected, "", " ") + t.Errorf("Bad unmarshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expStr, resStr) + } +} + +func TestDocPartialUnmarshal(t *testing.T) { + result := testDocSubs{} + + tree, _ := LoadFile("marshal_test.toml") + subTree := tree.Get("subdoc").(*Tree) + err := subTree.Unmarshal(&result) + expected := docData.Subdocs + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(result, expected) { + resStr, _ := json.MarshalIndent(result, "", " ") + expStr, _ := json.MarshalIndent(expected, "", " ") + t.Errorf("Bad partial unmartial: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expStr, resStr) + } +} + +type tomlTypeCheckTest struct { + name string + item interface{} + typ int //0=primitive, 1=otherslice, 2=treeslice, 3=tree +} + +func TestTypeChecks(t *testing.T) { + tests := []tomlTypeCheckTest{ + {"integer", 2, 0}, + {"time", time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC), 0}, + {"stringlist", []string{"hello", "hi"}, 1}, + {"timelist", []time.Time{time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, 1}, + {"objectlist", []tomlTypeCheckTest{}, 2}, + {"object", tomlTypeCheckTest{}, 3}, + } + + for _, test := range tests { + expected := []bool{false, false, false, false} + expected[test.typ] = true + result := []bool{ + isPrimitive(reflect.TypeOf(test.item)), + isOtherSlice(reflect.TypeOf(test.item)), + isTreeSlice(reflect.TypeOf(test.item)), + isTree(reflect.TypeOf(test.item)), + } + if !reflect.DeepEqual(expected, result) { + t.Errorf("Bad type check on %q: expected %v, got %v", test.name, expected, result) + } + } +} + +type unexportedMarshalTestStruct struct { + String string `toml:"string"` + StringList []string `toml:"strlist"` + Sub basicMarshalTestSubStruct `toml:"subdoc"` + SubList []basicMarshalTestSubStruct `toml:"sublist"` + unexported int `toml:"shouldntBeHere"` + Unexported2 int `toml:"-"` +} + +var unexportedTestData = unexportedMarshalTestStruct{ + String: "Hello", + StringList: []string{"Howdy", "Hey There"}, + Sub: basicMarshalTestSubStruct{"One"}, + SubList: []basicMarshalTestSubStruct{{"Two"}, {"Three"}}, + unexported: 0, + Unexported2: 0, +} + +var unexportedTestToml = []byte(`string = "Hello" +strlist = ["Howdy","Hey There"] +unexported = 1 +shouldntBeHere = 2 + +[subdoc] + String2 = "One" + +[[sublist]] + String2 = "Two" + +[[sublist]] + String2 = "Three" +`) + +func TestUnexportedUnmarshal(t *testing.T) { + result := unexportedMarshalTestStruct{} + err := Unmarshal(unexportedTestToml, &result) + expected := unexportedTestData + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("Bad unexported unmarshal: expected %v, got %v", expected, result) + } +} + +type errStruct struct { + Bool bool `toml:"bool"` + Date time.Time `toml:"date"` + Float float64 `toml:"float"` + Int int16 `toml:"int"` + String *string `toml:"string"` +} + +var errTomls = []string{ + "bool = truly\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = 5000\nstring = \"Bite me\"", + "bool = true\ndate = 1979-05-27T07:3200Z\nfloat = 123.4\nint = 5000\nstring = \"Bite me\"", + "bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123a4\nint = 5000\nstring = \"Bite me\"", + "bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = j000\nstring = \"Bite me\"", + "bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = 5000\nstring = Bite me", + "bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = 5000\nstring = Bite me", + "bool = 1\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = 5000\nstring = \"Bite me\"", + "bool = true\ndate = 1\nfloat = 123.4\nint = 5000\nstring = \"Bite me\"", + "bool = true\ndate = 1979-05-27T07:32:00Z\n\"sorry\"\nint = 5000\nstring = \"Bite me\"", + "bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = \"sorry\"\nstring = \"Bite me\"", + "bool = true\ndate = 1979-05-27T07:32:00Z\nfloat = 123.4\nint = 5000\nstring = 1", +} + +type mapErr struct { + Vals map[string]float64 +} + +type intErr struct { + Int1 int + Int2 int8 + Int3 int16 + Int4 int32 + Int5 int64 + UInt1 uint + UInt2 uint8 + UInt3 uint16 + UInt4 uint32 + UInt5 uint64 + Flt1 float32 + Flt2 float64 +} + +var intErrTomls = []string{ + "Int1 = []\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0", + "Int1 = 1\nInt2 = []\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0", + "Int1 = 1\nInt2 = 2\nInt3 = []\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0", + "Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = []\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0", + "Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = []\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0", + "Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = []\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0", + "Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = []\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0", + "Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = []\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0", + "Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = []\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = 2.0", + "Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = []\nFlt1 = 1.0\nFlt2 = 2.0", + "Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = []\nFlt2 = 2.0", + "Int1 = 1\nInt2 = 2\nInt3 = 3\nInt4 = 4\nInt5 = 5\nUInt1 = 1\nUInt2 = 2\nUInt3 = 3\nUInt4 = 4\nUInt5 = 5\nFlt1 = 1.0\nFlt2 = []", +} + +func TestErrUnmarshal(t *testing.T) { + for ind, toml := range errTomls { + result := errStruct{} + err := Unmarshal([]byte(toml), &result) + if err == nil { + t.Errorf("Expected err from case %d\n", ind) + } + } + result2 := mapErr{} + err := Unmarshal([]byte("[Vals]\nfred=\"1.2\""), &result2) + if err == nil { + t.Errorf("Expected err from map") + } + for ind, toml := range intErrTomls { + result3 := intErr{} + err := Unmarshal([]byte(toml), &result3) + if err == nil { + t.Errorf("Expected int err from case %d\n", ind) + } + } +} + +type emptyMarshalTestStruct struct { + Title string `toml:"title"` + Bool bool `toml:"bool"` + Int int `toml:"int"` + String string `toml:"string"` + StringList []string `toml:"stringlist"` + Ptr *basicMarshalTestStruct `toml:"ptr"` + Map map[string]string `toml:"map"` +} + +var emptyTestData = emptyMarshalTestStruct{ + Title: "Placeholder", + Bool: false, + Int: 0, + String: "", + StringList: []string{}, + Ptr: nil, + Map: map[string]string{}, +} + +var emptyTestToml = []byte(`bool = false +int = 0 +string = "" +stringlist = [] +title = "Placeholder" + +[map] +`) + +type emptyMarshalTestStruct2 struct { + Title string `toml:"title"` + Bool bool `toml:"bool,omitempty"` + Int int `toml:"int, omitempty"` + String string `toml:"string,omitempty "` + StringList []string `toml:"stringlist,omitempty"` + Ptr *basicMarshalTestStruct `toml:"ptr,omitempty"` + Map map[string]string `toml:"map,omitempty"` +} + +var emptyTestData2 = emptyMarshalTestStruct2{ + Title: "Placeholder", + Bool: false, + Int: 0, + String: "", + StringList: []string{}, + Ptr: nil, + Map: map[string]string{}, +} + +var emptyTestToml2 = []byte(`title = "Placeholder" +`) + +func TestEmptyMarshal(t *testing.T) { + result, err := Marshal(emptyTestData) + if err != nil { + t.Fatal(err) + } + expected := emptyTestToml + if !bytes.Equal(result, expected) { + t.Errorf("Bad empty marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) + } +} + +func TestEmptyMarshalOmit(t *testing.T) { + result, err := Marshal(emptyTestData2) + if err != nil { + t.Fatal(err) + } + expected := emptyTestToml2 + if !bytes.Equal(result, expected) { + t.Errorf("Bad empty omit marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) + } +} + +func TestEmptyUnmarshal(t *testing.T) { + result := emptyMarshalTestStruct{} + err := Unmarshal(emptyTestToml, &result) + expected := emptyTestData + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("Bad empty unmarshal: expected %v, got %v", expected, result) + } +} + +func TestEmptyUnmarshalOmit(t *testing.T) { + result := emptyMarshalTestStruct2{} + err := Unmarshal(emptyTestToml, &result) + expected := emptyTestData2 + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("Bad empty omit unmarshal: expected %v, got %v", expected, result) + } +} + +type pointerMarshalTestStruct struct { + Str *string + List *[]string + ListPtr *[]*string + Map *map[string]string + MapPtr *map[string]*string + EmptyStr *string + EmptyList *[]string + EmptyMap *map[string]string + DblPtr *[]*[]*string +} + +var pointerStr = "Hello" +var pointerList = []string{"Hello back"} +var pointerListPtr = []*string{&pointerStr} +var pointerMap = map[string]string{"response": "Goodbye"} +var pointerMapPtr = map[string]*string{"alternate": &pointerStr} +var pointerTestData = pointerMarshalTestStruct{ + Str: &pointerStr, + List: &pointerList, + ListPtr: &pointerListPtr, + Map: &pointerMap, + MapPtr: &pointerMapPtr, + EmptyStr: nil, + EmptyList: nil, + EmptyMap: nil, +} + +var pointerTestToml = []byte(`List = ["Hello back"] +ListPtr = ["Hello"] +Str = "Hello" + +[Map] + response = "Goodbye" + +[MapPtr] + alternate = "Hello" +`) + +func TestPointerMarshal(t *testing.T) { + result, err := Marshal(pointerTestData) + if err != nil { + t.Fatal(err) + } + expected := pointerTestToml + if !bytes.Equal(result, expected) { + t.Errorf("Bad pointer marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) + } +} + +func TestPointerUnmarshal(t *testing.T) { + result := pointerMarshalTestStruct{} + err := Unmarshal(pointerTestToml, &result) + expected := pointerTestData + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("Bad pointer unmarshal: expected %v, got %v", expected, result) + } +} + +func TestUnmarshalTypeMismatch(t *testing.T) { + result := pointerMarshalTestStruct{} + err := Unmarshal([]byte("List = 123"), &result) + if !strings.HasPrefix(err.Error(), "(1, 1): Can't convert 123(int64) to []string(slice)") { + t.Errorf("Type mismatch must be reported: got %v", err.Error()) + } +} + +type nestedMarshalTestStruct struct { + String [][]string + //Struct [][]basicMarshalTestSubStruct + StringPtr *[]*[]*string + // StructPtr *[]*[]*basicMarshalTestSubStruct +} + +var str1 = "Three" +var str2 = "Four" +var strPtr = []*string{&str1, &str2} +var strPtr2 = []*[]*string{&strPtr} + +var nestedTestData = nestedMarshalTestStruct{ + String: [][]string{{"Five", "Six"}, {"One", "Two"}}, + StringPtr: &strPtr2, +} + +var nestedTestToml = []byte(`String = [["Five","Six"],["One","Two"]] +StringPtr = [["Three","Four"]] +`) + +func TestNestedMarshal(t *testing.T) { + result, err := Marshal(nestedTestData) + if err != nil { + t.Fatal(err) + } + expected := nestedTestToml + if !bytes.Equal(result, expected) { + t.Errorf("Bad nested marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) + } +} + +func TestNestedUnmarshal(t *testing.T) { + result := nestedMarshalTestStruct{} + err := Unmarshal(nestedTestToml, &result) + expected := nestedTestData + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("Bad nested unmarshal: expected %v, got %v", expected, result) + } +} + +type customMarshalerParent struct { + Self customMarshaler `toml:"me"` + Friends []customMarshaler `toml:"friends"` +} + +type customMarshaler struct { + FirsName string + LastName string +} + +func (c customMarshaler) MarshalTOML() ([]byte, error) { + fullName := fmt.Sprintf("%s %s", c.FirsName, c.LastName) + return []byte(fullName), nil +} + +var customMarshalerData = customMarshaler{FirsName: "Sally", LastName: "Fields"} +var customMarshalerToml = []byte(`Sally Fields`) +var nestedCustomMarshalerData = customMarshalerParent{ + Self: customMarshaler{FirsName: "Maiku", LastName: "Suteda"}, + Friends: []customMarshaler{customMarshalerData}, +} +var nestedCustomMarshalerToml = []byte(`friends = ["Sally Fields"] +me = "Maiku Suteda" +`) + +func TestCustomMarshaler(t *testing.T) { + result, err := Marshal(customMarshalerData) + if err != nil { + t.Fatal(err) + } + expected := customMarshalerToml + if !bytes.Equal(result, expected) { + t.Errorf("Bad custom marshaler: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) + } +} + +func TestNestedCustomMarshaler(t *testing.T) { + result, err := Marshal(nestedCustomMarshalerData) + if err != nil { + t.Fatal(err) + } + expected := nestedCustomMarshalerToml + if !bytes.Equal(result, expected) { + t.Errorf("Bad nested custom marshaler: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) + } +} + +var commentTestToml = []byte(` +# it's a comment on type +[postgres] + # isCommented = "dvalue" + noComment = "cvalue" + + # A comment on AttrB with a + # break line + password = "bvalue" + + # A comment on AttrA + user = "avalue" + + [[postgres.My]] + + # a comment on my on typeC + My = "Foo" + + [[postgres.My]] + + # a comment on my on typeC + My = "Baar" +`) + +func TestMarshalComment(t *testing.T) { + type TypeC struct { + My string `comment:"a comment on my on typeC"` + } + type TypeB struct { + AttrA string `toml:"user" comment:"A comment on AttrA"` + AttrB string `toml:"password" comment:"A comment on AttrB with a\n break line"` + AttrC string `toml:"noComment"` + AttrD string `toml:"isCommented" commented:"true"` + My []TypeC + } + type TypeA struct { + TypeB TypeB `toml:"postgres" comment:"it's a comment on type"` + } + + ta := []TypeC{{My: "Foo"}, {My: "Baar"}} + config := TypeA{TypeB{AttrA: "avalue", AttrB: "bvalue", AttrC: "cvalue", AttrD: "dvalue", My: ta}} + result, err := Marshal(config) + if err != nil { + t.Fatal(err) + } + expected := commentTestToml + if !bytes.Equal(result, expected) { + t.Errorf("Bad marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) + } +} + +type mapsTestStruct struct { + Simple map[string]string + Paths map[string]string + Other map[string]float64 + X struct { + Y struct { + Z map[string]bool + } + } +} + +var mapsTestData = mapsTestStruct{ + Simple: map[string]string{ + "one plus one": "two", + "next": "three", + }, + Paths: map[string]string{ + "/this/is/a/path": "/this/is/also/a/path", + "/heloo.txt": "/tmp/lololo.txt", + }, + Other: map[string]float64{ + "testing": 3.9999, + }, + X: struct{ Y struct{ Z map[string]bool } }{ + Y: struct{ Z map[string]bool }{ + Z: map[string]bool{ + "is.Nested": true, + }, + }, + }, +} +var mapsTestToml = []byte(` +[Other] + "testing" = 3.9999 + +[Paths] + "/heloo.txt" = "/tmp/lololo.txt" + "/this/is/a/path" = "/this/is/also/a/path" + +[Simple] + "next" = "three" + "one plus one" = "two" + +[X] + + [X.Y] + + [X.Y.Z] + "is.Nested" = true +`) + +func TestEncodeQuotedMapKeys(t *testing.T) { + var buf bytes.Buffer + if err := NewEncoder(&buf).QuoteMapKeys(true).Encode(mapsTestData); err != nil { + t.Fatal(err) + } + result := buf.Bytes() + expected := mapsTestToml + if !bytes.Equal(result, expected) { + t.Errorf("Bad maps marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) + } +} + +func TestDecodeQuotedMapKeys(t *testing.T) { + result := mapsTestStruct{} + err := NewDecoder(bytes.NewBuffer(mapsTestToml)).Decode(&result) + expected := mapsTestData + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("Bad maps unmarshal: expected %v, got %v", expected, result) + } +} + +type structArrayNoTag struct { + A struct { + B []int64 + C []int64 + } +} + +func TestMarshalArray(t *testing.T) { + expected := []byte(` +[A] + B = [1,2,3] + C = [1] +`) + + m := structArrayNoTag{ + A: struct { + B []int64 + C []int64 + }{ + B: []int64{1, 2, 3}, + C: []int64{1}, + }, + } + + b, err := Marshal(m) + + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(b, expected) { + t.Errorf("Bad arrays marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, b) + } +} + +func TestMarshalArrayOnePerLine(t *testing.T) { + expected := []byte(` +[A] + B = [ + 1, + 2, + 3 + ] + C = [1] +`) + + m := structArrayNoTag{ + A: struct { + B []int64 + C []int64 + }{ + B: []int64{1, 2, 3}, + C: []int64{1}, + }, + } + + var buf bytes.Buffer + encoder := NewEncoder(&buf).ArraysWithOneElementPerLine(true) + err := encoder.Encode(m) + + if err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + + if !bytes.Equal(b, expected) { + t.Errorf("Bad arrays marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, b) + } +} diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml new file mode 100644 index 000000000..1c5f98e7a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml @@ -0,0 +1,38 @@ +title = "TOML Marshal Testing" + +[basic] + bool = true + date = 1979-05-27T07:32:00Z + float = 123.4 + int = 5000 + string = "Bite me" + uint = 5001 + +[basic_lists] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + floats = [12.3,45.6,78.9] + ints = [8001,8001,8002] + strings = ["One","Two","Three"] + uints = [5002,5003] + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.first] + name = "First" + + [subdoc.second] + name = "Second" + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" + +[[subdocptrs]] + name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go new file mode 100644 index 000000000..2d27599a9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -0,0 +1,430 @@ +// TOML Parser. + +package toml + +import ( + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +type tomlParser struct { + flowIdx int + flow []token + tree *Tree + currentTable []string + seenTableKeys []string +} + +type tomlParserStateFn func() tomlParserStateFn + +// Formats and panics an error message based on a token +func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *tomlParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *tomlParser) peek() *token { + if p.flowIdx >= len(p.flow) { + return nil + } + return &p.flow[p.flowIdx] +} + +func (p *tomlParser) assume(typ tokenType) { + tok := p.getToken() + if tok == nil { + p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) + } + if tok.typ != typ { + p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) + } +} + +func (p *tomlParser) getToken() *token { + tok := p.peek() + if tok == nil { + return nil + } + p.flowIdx++ + return tok +} + +func (p *tomlParser) parseStart() tomlParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenDoubleLeftBracket: + return p.parseGroupArray + case tokenLeftBracket: + return p.parseGroup + case tokenKey: + return p.parseAssign + case tokenEOF: + return nil + default: + p.raiseError(tok, "unexpected token") + } + return nil +} + +func (p *tomlParser) parseGroupArray() tomlParserStateFn { + startToken := p.getToken() // discard the [[ + key := p.getToken() + if key.typ != tokenKeyGroupArray { + p.raiseError(key, "unexpected token %s, was expecting a table array key", key) + } + + // get or create table array element at the indicated part in the path + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries + destTree := p.tree.GetPath(keys) + var array []*Tree + if destTree == nil { + array = make([]*Tree, 0) + } else if target, ok := destTree.([]*Tree); ok && target != nil { + array = destTree.([]*Tree) + } else { + p.raiseError(key, "key %s is already assigned and not of type table array", key) + } + p.currentTable = keys + + // add a new tree to the end of the table array + newTree := newTree() + newTree.position = startToken.Position + array = append(array, newTree) + p.tree.SetPath(p.currentTable, array) + + // remove all keys that were children of this table array + prefix := key.val + "." + found := false + for ii := 0; ii < len(p.seenTableKeys); { + tableKey := p.seenTableKeys[ii] + if strings.HasPrefix(tableKey, prefix) { + p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) + } else { + found = (tableKey == key.val) + ii++ + } + } + + // keep this key name from use by other kinds of assignments + if !found { + p.seenTableKeys = append(p.seenTableKeys, key.val) + } + + // move to next parser state + p.assume(tokenDoubleRightBracket) + return p.parseStart +} + +func (p *tomlParser) parseGroup() tomlParserStateFn { + startToken := p.getToken() // discard the [ + key := p.getToken() + if key.typ != tokenKeyGroup { + p.raiseError(key, "unexpected token %s, was expecting a table key", key) + } + for _, item := range p.seenTableKeys { + if item == key.val { + p.raiseError(key, "duplicated tables") + } + } + + p.seenTableKeys = append(p.seenTableKeys, key.val) + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + if err := p.tree.createSubTree(keys, startToken.Position); err != nil { + p.raiseError(key, "%s", err) + } + p.assume(tokenRightBracket) + p.currentTable = keys + return p.parseStart +} + +func (p *tomlParser) parseAssign() tomlParserStateFn { + key := p.getToken() + p.assume(tokenEqual) + + value := p.parseRvalue() + var tableKey []string + if len(p.currentTable) > 0 { + tableKey = p.currentTable + } else { + tableKey = []string{} + } + + // find the table to assign, looking out for arrays of tables + var targetNode *Tree + switch node := p.tree.GetPath(tableKey).(type) { + case []*Tree: + targetNode = node[len(node)-1] + case *Tree: + targetNode = node + default: + p.raiseError(key, "Unknown table type for path: %s", + strings.Join(tableKey, ".")) + } + + // assign value to the found table + keyVals := []string{key.val} + if len(keyVals) != 1 { + p.raiseError(key, "Invalid key") + } + keyVal := keyVals[0] + localKey := []string{keyVal} + finalKey := append(tableKey, keyVal) + if targetNode.GetPath(localKey) != nil { + p.raiseError(key, "The following key was defined twice: %s", + strings.Join(finalKey, ".")) + } + var toInsert interface{} + + switch value.(type) { + case *Tree, []*Tree: + toInsert = value + default: + toInsert = &tomlValue{value: value, position: key.Position} + } + targetNode.values[keyVal] = toInsert + return p.parseStart +} + +var numberUnderscoreInvalidRegexp *regexp.Regexp +var hexNumberUnderscoreInvalidRegexp *regexp.Regexp + +func numberContainsInvalidUnderscore(value string) error { + if numberUnderscoreInvalidRegexp.MatchString(value) { + return errors.New("invalid use of _ in number") + } + return nil +} + +func hexNumberContainsInvalidUnderscore(value string) error { + if hexNumberUnderscoreInvalidRegexp.MatchString(value) { + return errors.New("invalid use of _ in hex number") + } + return nil +} + +func cleanupNumberToken(value string) string { + cleanedVal := strings.Replace(value, "_", "", -1) + return cleanedVal +} + +func (p *tomlParser) parseRvalue() interface{} { + tok := p.getToken() + if tok == nil || tok.typ == tokenEOF { + p.raiseError(tok, "expecting a value") + } + + switch tok.typ { + case tokenString: + return tok.val + case tokenTrue: + return true + case tokenFalse: + return false + case tokenInf: + if tok.val[0] == '-' { + return math.Inf(-1) + } + return math.Inf(1) + case tokenNan: + return math.NaN() + case tokenInteger: + cleanedVal := cleanupNumberToken(tok.val) + var err error + var val int64 + if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { + switch cleanedVal[1] { + case 'x': + err = hexNumberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) + case 'o': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) + case 'b': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) + default: + panic("invalid base") // the lexer should catch this first + } + } else { + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal, 10, 64) + } + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenFloat: + err := numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + cleanedVal := cleanupNumberToken(tok.val) + val, err := strconv.ParseFloat(cleanedVal, 64) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenDate: + val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLeftBracket: + return p.parseArray() + case tokenLeftCurlyBrace: + return p.parseInlineTable() + case tokenEqual: + p.raiseError(tok, "cannot have multiple equals for the same key") + case tokenError: + p.raiseError(tok, "%s", tok) + } + + p.raiseError(tok, "never reached") + + return nil +} + +func tokenIsComma(t *token) bool { + return t != nil && t.typ == tokenComma +} + +func (p *tomlParser) parseInlineTable() *Tree { + tree := newTree() + var previous *token +Loop: + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated inline table") + } + switch follow.typ { + case tokenRightCurlyBrace: + p.getToken() + break Loop + case tokenKey: + if !tokenIsComma(previous) && previous != nil { + p.raiseError(follow, "comma expected between fields in inline table") + } + key := p.getToken() + p.assume(tokenEqual) + value := p.parseRvalue() + tree.Set(key.val, value) + case tokenComma: + if previous == nil { + p.raiseError(follow, "inline table cannot start with a comma") + } + if tokenIsComma(previous) { + p.raiseError(follow, "need field between two commas in inline table") + } + p.getToken() + default: + p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) + } + previous = follow + } + if tokenIsComma(previous) { + p.raiseError(previous, "trailing comma at the end of inline table") + } + return tree +} + +func (p *tomlParser) parseArray() interface{} { + var array []interface{} + arrayType := reflect.TypeOf(nil) + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ == tokenRightBracket { + p.getToken() + break + } + val := p.parseRvalue() + if arrayType == nil { + arrayType = reflect.TypeOf(val) + } + if reflect.TypeOf(val) != arrayType { + p.raiseError(follow, "mixed types in array") + } + array = append(array, val) + follow = p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ != tokenRightBracket && follow.typ != tokenComma { + p.raiseError(follow, "missing comma") + } + if follow.typ == tokenComma { + p.getToken() + } + } + // An array of Trees is actually an array of inline + // tables, which is a shorthand for a table array. If the + // array was not converted from []interface{} to []*Tree, + // the two notations would not be equivalent. + if arrayType == reflect.TypeOf(newTree()) { + tomlArray := make([]*Tree, len(array)) + for i, v := range array { + tomlArray[i] = v.(*Tree) + } + return tomlArray + } + return array +} + +func parseToml(flow []token) *Tree { + result := newTree() + result.position = Position{1, 1} + parser := &tomlParser{ + flowIdx: 0, + flow: flow, + tree: result, + currentTable: make([]string, 0), + seenTableKeys: make([]string, 0), + } + parser.run() + return result +} + +func init() { + numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`) + hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`) +} diff --git a/vendor/github.com/pelletier/go-toml/parser_test.go b/vendor/github.com/pelletier/go-toml/parser_test.go new file mode 100644 index 000000000..ca29c442e --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/parser_test.go @@ -0,0 +1,899 @@ +package toml + +import ( + "fmt" + "math" + "reflect" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" +) + +func assertSubTree(t *testing.T, path []string, tree *Tree, err error, ref map[string]interface{}) { + if err != nil { + t.Error("Non-nil error:", err.Error()) + return + } + for k, v := range ref { + nextPath := append(path, k) + t.Log("asserting path", nextPath) + // NOTE: directly access key instead of resolve by path + // NOTE: see TestSpecialKV + switch node := tree.GetPath([]string{k}).(type) { + case []*Tree: + t.Log("\tcomparing key", nextPath, "by array iteration") + for idx, item := range node { + assertSubTree(t, nextPath, item, err, v.([]map[string]interface{})[idx]) + } + case *Tree: + t.Log("\tcomparing key", nextPath, "by subtree assestion") + assertSubTree(t, nextPath, node, err, v.(map[string]interface{})) + default: + t.Log("\tcomparing key", nextPath, "by string representation because it's of type", reflect.TypeOf(node)) + if fmt.Sprintf("%v", node) != fmt.Sprintf("%v", v) { + t.Errorf("was expecting %v at %v but got %v", v, k, node) + } + } + } +} + +func assertTree(t *testing.T, tree *Tree, err error, ref map[string]interface{}) { + t.Log("Asserting tree:\n", spew.Sdump(tree)) + assertSubTree(t, []string{}, tree, err, ref) + t.Log("Finished tree assertion.") +} + +func TestCreateSubTree(t *testing.T) { + tree := newTree() + tree.createSubTree([]string{"a", "b", "c"}, Position{}) + tree.Set("a.b.c", 42) + if tree.Get("a.b.c") != 42 { + t.Fail() + } +} + +func TestSimpleKV(t *testing.T) { + tree, err := Load("a = 42") + assertTree(t, tree, err, map[string]interface{}{ + "a": int64(42), + }) + + tree, _ = Load("a = 42\nb = 21") + assertTree(t, tree, err, map[string]interface{}{ + "a": int64(42), + "b": int64(21), + }) +} + +func TestNumberInKey(t *testing.T) { + tree, err := Load("hello2 = 42") + assertTree(t, tree, err, map[string]interface{}{ + "hello2": int64(42), + }) +} + +func TestIncorrectKeyExtraSquareBracket(t *testing.T) { + _, err := Load(`[a]b] +zyx = 42`) + if err == nil { + t.Error("Error should have been returned.") + } + if err.Error() != "(1, 4): unexpected token" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestSimpleNumbers(t *testing.T) { + tree, err := Load("a = +42\nb = -21\nc = +4.2\nd = -2.1") + assertTree(t, tree, err, map[string]interface{}{ + "a": int64(42), + "b": int64(-21), + "c": float64(4.2), + "d": float64(-2.1), + }) +} + +func TestSpecialFloats(t *testing.T) { + tree, err := Load(` +normalinf = inf +plusinf = +inf +minusinf = -inf +normalnan = nan +plusnan = +nan +minusnan = -nan +`) + assertTree(t, tree, err, map[string]interface{}{ + "normalinf": math.Inf(1), + "plusinf": math.Inf(1), + "minusinf": math.Inf(-1), + "normalnan": math.NaN(), + "plusnan": math.NaN(), + "minusnan": math.NaN(), + }) +} + +func TestHexIntegers(t *testing.T) { + tree, err := Load(`a = 0xDEADBEEF`) + assertTree(t, tree, err, map[string]interface{}{"a": int64(3735928559)}) + + tree, err = Load(`a = 0xdeadbeef`) + assertTree(t, tree, err, map[string]interface{}{"a": int64(3735928559)}) + + tree, err = Load(`a = 0xdead_beef`) + assertTree(t, tree, err, map[string]interface{}{"a": int64(3735928559)}) + + _, err = Load(`a = 0x_1`) + if err.Error() != "(1, 5): invalid use of _ in hex number" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestOctIntegers(t *testing.T) { + tree, err := Load(`a = 0o01234567`) + assertTree(t, tree, err, map[string]interface{}{"a": int64(342391)}) + + tree, err = Load(`a = 0o755`) + assertTree(t, tree, err, map[string]interface{}{"a": int64(493)}) + + _, err = Load(`a = 0o_1`) + if err.Error() != "(1, 5): invalid use of _ in number" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestBinIntegers(t *testing.T) { + tree, err := Load(`a = 0b11010110`) + assertTree(t, tree, err, map[string]interface{}{"a": int64(214)}) + + _, err = Load(`a = 0b_1`) + if err.Error() != "(1, 5): invalid use of _ in number" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestBadIntegerBase(t *testing.T) { + _, err := Load(`a = 0k1`) + if err.Error() != "(1, 5): unknown number base: k. possible options are x (hex) o (octal) b (binary)" { + t.Error("Error should have been returned.") + } +} + +func TestIntegerNoDigit(t *testing.T) { + _, err := Load(`a = 0b`) + if err.Error() != "(1, 5): number needs at least one digit" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestNumbersWithUnderscores(t *testing.T) { + tree, err := Load("a = 1_000") + assertTree(t, tree, err, map[string]interface{}{ + "a": int64(1000), + }) + + tree, err = Load("a = 5_349_221") + assertTree(t, tree, err, map[string]interface{}{ + "a": int64(5349221), + }) + + tree, err = Load("a = 1_2_3_4_5") + assertTree(t, tree, err, map[string]interface{}{ + "a": int64(12345), + }) + + tree, err = Load("flt8 = 9_224_617.445_991_228_313") + assertTree(t, tree, err, map[string]interface{}{ + "flt8": float64(9224617.445991228313), + }) + + tree, err = Load("flt9 = 1e1_00") + assertTree(t, tree, err, map[string]interface{}{ + "flt9": float64(1e100), + }) +} + +func TestFloatsWithExponents(t *testing.T) { + tree, err := Load("a = 5e+22\nb = 5E+22\nc = -5e+22\nd = -5e-22\ne = 6.626e-34") + assertTree(t, tree, err, map[string]interface{}{ + "a": float64(5e+22), + "b": float64(5E+22), + "c": float64(-5e+22), + "d": float64(-5e-22), + "e": float64(6.626e-34), + }) +} + +func TestSimpleDate(t *testing.T) { + tree, err := Load("a = 1979-05-27T07:32:00Z") + assertTree(t, tree, err, map[string]interface{}{ + "a": time.Date(1979, time.May, 27, 7, 32, 0, 0, time.UTC), + }) +} + +func TestDateOffset(t *testing.T) { + tree, err := Load("a = 1979-05-27T00:32:00-07:00") + assertTree(t, tree, err, map[string]interface{}{ + "a": time.Date(1979, time.May, 27, 0, 32, 0, 0, time.FixedZone("", -7*60*60)), + }) +} + +func TestDateNano(t *testing.T) { + tree, err := Load("a = 1979-05-27T00:32:00.999999999-07:00") + assertTree(t, tree, err, map[string]interface{}{ + "a": time.Date(1979, time.May, 27, 0, 32, 0, 999999999, time.FixedZone("", -7*60*60)), + }) +} + +func TestSimpleString(t *testing.T) { + tree, err := Load("a = \"hello world\"") + assertTree(t, tree, err, map[string]interface{}{ + "a": "hello world", + }) +} + +func TestSpaceKey(t *testing.T) { + tree, err := Load("\"a b\" = \"hello world\"") + assertTree(t, tree, err, map[string]interface{}{ + "a b": "hello world", + }) +} + +func TestDoubleQuotedKey(t *testing.T) { + tree, err := Load(` + "key" = "a" + "\t" = "b" + "\U0001F914" = "c" + "\u2764" = "d" + `) + assertTree(t, tree, err, map[string]interface{}{ + "key": "a", + "\t": "b", + "\U0001F914": "c", + "\u2764": "d", + }) +} + +func TestSingleQuotedKey(t *testing.T) { + tree, err := Load(` + 'key' = "a" + '\t' = "b" + '\U0001F914' = "c" + '\u2764' = "d" + `) + assertTree(t, tree, err, map[string]interface{}{ + `key`: "a", + `\t`: "b", + `\U0001F914`: "c", + `\u2764`: "d", + }) +} + +func TestStringEscapables(t *testing.T) { + tree, err := Load("a = \"a \\n b\"") + assertTree(t, tree, err, map[string]interface{}{ + "a": "a \n b", + }) + + tree, err = Load("a = \"a \\t b\"") + assertTree(t, tree, err, map[string]interface{}{ + "a": "a \t b", + }) + + tree, err = Load("a = \"a \\r b\"") + assertTree(t, tree, err, map[string]interface{}{ + "a": "a \r b", + }) + + tree, err = Load("a = \"a \\\\ b\"") + assertTree(t, tree, err, map[string]interface{}{ + "a": "a \\ b", + }) +} + +func TestEmptyQuotedString(t *testing.T) { + tree, err := Load(`[""] +"" = 1`) + assertTree(t, tree, err, map[string]interface{}{ + "": map[string]interface{}{ + "": int64(1), + }, + }) +} + +func TestBools(t *testing.T) { + tree, err := Load("a = true\nb = false") + assertTree(t, tree, err, map[string]interface{}{ + "a": true, + "b": false, + }) +} + +func TestNestedKeys(t *testing.T) { + tree, err := Load("[a.b.c]\nd = 42") + assertTree(t, tree, err, map[string]interface{}{ + "a": map[string]interface{}{ + "b": map[string]interface{}{ + "c": map[string]interface{}{ + "d": int64(42), + }, + }, + }, + }) +} + +func TestNestedQuotedUnicodeKeys(t *testing.T) { + tree, err := Load("[ j . \"ʞ\" . l ]\nd = 42") + assertTree(t, tree, err, map[string]interface{}{ + "j": map[string]interface{}{ + "ʞ": map[string]interface{}{ + "l": map[string]interface{}{ + "d": int64(42), + }, + }, + }, + }) + + tree, err = Load("[ g . h . i ]\nd = 42") + assertTree(t, tree, err, map[string]interface{}{ + "g": map[string]interface{}{ + "h": map[string]interface{}{ + "i": map[string]interface{}{ + "d": int64(42), + }, + }, + }, + }) + + tree, err = Load("[ d.e.f ]\nk = 42") + assertTree(t, tree, err, map[string]interface{}{ + "d": map[string]interface{}{ + "e": map[string]interface{}{ + "f": map[string]interface{}{ + "k": int64(42), + }, + }, + }, + }) +} + +func TestArrayOne(t *testing.T) { + tree, err := Load("a = [1]") + assertTree(t, tree, err, map[string]interface{}{ + "a": []int64{int64(1)}, + }) +} + +func TestArrayZero(t *testing.T) { + tree, err := Load("a = []") + assertTree(t, tree, err, map[string]interface{}{ + "a": []interface{}{}, + }) +} + +func TestArraySimple(t *testing.T) { + tree, err := Load("a = [42, 21, 10]") + assertTree(t, tree, err, map[string]interface{}{ + "a": []int64{int64(42), int64(21), int64(10)}, + }) + + tree, _ = Load("a = [42, 21, 10,]") + assertTree(t, tree, err, map[string]interface{}{ + "a": []int64{int64(42), int64(21), int64(10)}, + }) +} + +func TestArrayMultiline(t *testing.T) { + tree, err := Load("a = [42,\n21, 10,]") + assertTree(t, tree, err, map[string]interface{}{ + "a": []int64{int64(42), int64(21), int64(10)}, + }) +} + +func TestArrayNested(t *testing.T) { + tree, err := Load("a = [[42, 21], [10]]") + assertTree(t, tree, err, map[string]interface{}{ + "a": [][]int64{{int64(42), int64(21)}, {int64(10)}}, + }) +} + +func TestNestedArrayComment(t *testing.T) { + tree, err := Load(` +someArray = [ +# does not work +["entry1"] +]`) + assertTree(t, tree, err, map[string]interface{}{ + "someArray": [][]string{{"entry1"}}, + }) +} + +func TestNestedEmptyArrays(t *testing.T) { + tree, err := Load("a = [[[]]]") + assertTree(t, tree, err, map[string]interface{}{ + "a": [][][]interface{}{{{}}}, + }) +} + +func TestArrayMixedTypes(t *testing.T) { + _, err := Load("a = [42, 16.0]") + if err.Error() != "(1, 10): mixed types in array" { + t.Error("Bad error message:", err.Error()) + } + + _, err = Load("a = [42, \"hello\"]") + if err.Error() != "(1, 11): mixed types in array" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestArrayNestedStrings(t *testing.T) { + tree, err := Load("data = [ [\"gamma\", \"delta\"], [\"Foo\"] ]") + assertTree(t, tree, err, map[string]interface{}{ + "data": [][]string{{"gamma", "delta"}, {"Foo"}}, + }) +} + +func TestParseUnknownRvalue(t *testing.T) { + _, err := Load("a = !bssss") + if err == nil { + t.Error("Expecting a parse error") + } + + _, err = Load("a = /b") + if err == nil { + t.Error("Expecting a parse error") + } +} + +func TestMissingValue(t *testing.T) { + _, err := Load("a = ") + if err.Error() != "(1, 5): expecting a value" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestUnterminatedArray(t *testing.T) { + _, err := Load("a = [1,") + if err.Error() != "(1, 8): unterminated array" { + t.Error("Bad error message:", err.Error()) + } + + _, err = Load("a = [1") + if err.Error() != "(1, 7): unterminated array" { + t.Error("Bad error message:", err.Error()) + } + + _, err = Load("a = [1 2") + if err.Error() != "(1, 8): missing comma" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestNewlinesInArrays(t *testing.T) { + tree, err := Load("a = [1,\n2,\n3]") + assertTree(t, tree, err, map[string]interface{}{ + "a": []int64{int64(1), int64(2), int64(3)}, + }) +} + +func TestArrayWithExtraComma(t *testing.T) { + tree, err := Load("a = [1,\n2,\n3,\n]") + assertTree(t, tree, err, map[string]interface{}{ + "a": []int64{int64(1), int64(2), int64(3)}, + }) +} + +func TestArrayWithExtraCommaComment(t *testing.T) { + tree, err := Load("a = [1, # wow\n2, # such items\n3, # so array\n]") + assertTree(t, tree, err, map[string]interface{}{ + "a": []int64{int64(1), int64(2), int64(3)}, + }) +} + +func TestSimpleInlineGroup(t *testing.T) { + tree, err := Load("key = {a = 42}") + assertTree(t, tree, err, map[string]interface{}{ + "key": map[string]interface{}{ + "a": int64(42), + }, + }) +} + +func TestDoubleInlineGroup(t *testing.T) { + tree, err := Load("key = {a = 42, b = \"foo\"}") + assertTree(t, tree, err, map[string]interface{}{ + "key": map[string]interface{}{ + "a": int64(42), + "b": "foo", + }, + }) +} + +func TestExampleInlineGroup(t *testing.T) { + tree, err := Load(`name = { first = "Tom", last = "Preston-Werner" } +point = { x = 1, y = 2 }`) + assertTree(t, tree, err, map[string]interface{}{ + "name": map[string]interface{}{ + "first": "Tom", + "last": "Preston-Werner", + }, + "point": map[string]interface{}{ + "x": int64(1), + "y": int64(2), + }, + }) +} + +func TestExampleInlineGroupInArray(t *testing.T) { + tree, err := Load(`points = [{ x = 1, y = 2 }]`) + assertTree(t, tree, err, map[string]interface{}{ + "points": []map[string]interface{}{ + { + "x": int64(1), + "y": int64(2), + }, + }, + }) +} + +func TestInlineTableUnterminated(t *testing.T) { + _, err := Load("foo = {") + if err.Error() != "(1, 8): unterminated inline table" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestInlineTableCommaExpected(t *testing.T) { + _, err := Load("foo = {hello = 53 test = foo}") + if err.Error() != "(1, 19): comma expected between fields in inline table" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestInlineTableCommaStart(t *testing.T) { + _, err := Load("foo = {, hello = 53}") + if err.Error() != "(1, 8): inline table cannot start with a comma" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestInlineTableDoubleComma(t *testing.T) { + _, err := Load("foo = {hello = 53,, foo = 17}") + if err.Error() != "(1, 19): need field between two commas in inline table" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestDuplicateGroups(t *testing.T) { + _, err := Load("[foo]\na=2\n[foo]b=3") + if err.Error() != "(3, 2): duplicated tables" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestDuplicateKeys(t *testing.T) { + _, err := Load("foo = 2\nfoo = 3") + if err.Error() != "(2, 1): The following key was defined twice: foo" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestEmptyIntermediateTable(t *testing.T) { + _, err := Load("[foo..bar]") + if err.Error() != "(1, 2): invalid table array key: empty table key" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestImplicitDeclarationBefore(t *testing.T) { + tree, err := Load("[a.b.c]\nanswer = 42\n[a]\nbetter = 43") + assertTree(t, tree, err, map[string]interface{}{ + "a": map[string]interface{}{ + "b": map[string]interface{}{ + "c": map[string]interface{}{ + "answer": int64(42), + }, + }, + "better": int64(43), + }, + }) +} + +func TestFloatsWithoutLeadingZeros(t *testing.T) { + _, err := Load("a = .42") + if err.Error() != "(1, 5): cannot start float with a dot" { + t.Error("Bad error message:", err.Error()) + } + + _, err = Load("a = -.42") + if err.Error() != "(1, 5): cannot start float with a dot" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestMissingFile(t *testing.T) { + _, err := LoadFile("foo.toml") + if err.Error() != "open foo.toml: no such file or directory" && + err.Error() != "open foo.toml: The system cannot find the file specified." { + t.Error("Bad error message:", err.Error()) + } +} + +func TestParseFile(t *testing.T) { + tree, err := LoadFile("example.toml") + + assertTree(t, tree, err, map[string]interface{}{ + "title": "TOML Example", + "owner": map[string]interface{}{ + "name": "Tom Preston-Werner", + "organization": "GitHub", + "bio": "GitHub Cofounder & CEO\nLikes tater tots and beer.", + "dob": time.Date(1979, time.May, 27, 7, 32, 0, 0, time.UTC), + }, + "database": map[string]interface{}{ + "server": "192.168.1.1", + "ports": []int64{8001, 8001, 8002}, + "connection_max": 5000, + "enabled": true, + }, + "servers": map[string]interface{}{ + "alpha": map[string]interface{}{ + "ip": "10.0.0.1", + "dc": "eqdc10", + }, + "beta": map[string]interface{}{ + "ip": "10.0.0.2", + "dc": "eqdc10", + }, + }, + "clients": map[string]interface{}{ + "data": []interface{}{ + []string{"gamma", "delta"}, + []int64{1, 2}, + }, + }, + }) +} + +func TestParseFileCRLF(t *testing.T) { + tree, err := LoadFile("example-crlf.toml") + + assertTree(t, tree, err, map[string]interface{}{ + "title": "TOML Example", + "owner": map[string]interface{}{ + "name": "Tom Preston-Werner", + "organization": "GitHub", + "bio": "GitHub Cofounder & CEO\nLikes tater tots and beer.", + "dob": time.Date(1979, time.May, 27, 7, 32, 0, 0, time.UTC), + }, + "database": map[string]interface{}{ + "server": "192.168.1.1", + "ports": []int64{8001, 8001, 8002}, + "connection_max": 5000, + "enabled": true, + }, + "servers": map[string]interface{}{ + "alpha": map[string]interface{}{ + "ip": "10.0.0.1", + "dc": "eqdc10", + }, + "beta": map[string]interface{}{ + "ip": "10.0.0.2", + "dc": "eqdc10", + }, + }, + "clients": map[string]interface{}{ + "data": []interface{}{ + []string{"gamma", "delta"}, + []int64{1, 2}, + }, + }, + }) +} + +func TestParseKeyGroupArray(t *testing.T) { + tree, err := Load("[[foo.bar]] a = 42\n[[foo.bar]] a = 69") + assertTree(t, tree, err, map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": []map[string]interface{}{ + {"a": int64(42)}, + {"a": int64(69)}, + }, + }, + }) +} + +func TestParseKeyGroupArrayUnfinished(t *testing.T) { + _, err := Load("[[foo.bar]\na = 42") + if err.Error() != "(1, 10): was expecting token [[, but got unclosed table array key instead" { + t.Error("Bad error message:", err.Error()) + } + + _, err = Load("[[foo.[bar]\na = 42") + if err.Error() != "(1, 3): unexpected token table array key cannot contain ']', was expecting a table array key" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestParseKeyGroupArrayQueryExample(t *testing.T) { + tree, err := Load(` + [[book]] + title = "The Stand" + author = "Stephen King" + [[book]] + title = "For Whom the Bell Tolls" + author = "Ernest Hemmingway" + [[book]] + title = "Neuromancer" + author = "William Gibson" + `) + + assertTree(t, tree, err, map[string]interface{}{ + "book": []map[string]interface{}{ + {"title": "The Stand", "author": "Stephen King"}, + {"title": "For Whom the Bell Tolls", "author": "Ernest Hemmingway"}, + {"title": "Neuromancer", "author": "William Gibson"}, + }, + }) +} + +func TestParseKeyGroupArraySpec(t *testing.T) { + tree, err := Load("[[fruit]]\n name=\"apple\"\n [fruit.physical]\n color=\"red\"\n shape=\"round\"\n [[fruit]]\n name=\"banana\"") + assertTree(t, tree, err, map[string]interface{}{ + "fruit": []map[string]interface{}{ + {"name": "apple", "physical": map[string]interface{}{"color": "red", "shape": "round"}}, + {"name": "banana"}, + }, + }) +} + +func TestTomlValueStringRepresentation(t *testing.T) { + for idx, item := range []struct { + Value interface{} + Expect string + }{ + {int64(12345), "12345"}, + {uint64(50), "50"}, + {float64(123.45), "123.45"}, + {true, "true"}, + {"hello world", "\"hello world\""}, + {"\b\t\n\f\r\"\\", "\"\\b\\t\\n\\f\\r\\\"\\\\\""}, + {"\x05", "\"\\u0005\""}, + {time.Date(1979, time.May, 27, 7, 32, 0, 0, time.UTC), + "1979-05-27T07:32:00Z"}, + {[]interface{}{"gamma", "delta"}, + "[\"gamma\",\"delta\"]"}, + {nil, ""}, + } { + result, err := tomlValueStringRepresentation(item.Value, "", false) + if err != nil { + t.Errorf("Test %d - unexpected error: %s", idx, err) + } + if result != item.Expect { + t.Errorf("Test %d - got '%s', expected '%s'", idx, result, item.Expect) + } + } +} + +func TestToStringMapStringString(t *testing.T) { + tree, err := TreeFromMap(map[string]interface{}{"m": map[string]interface{}{"v": "abc"}}) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + want := "\n[m]\n v = \"abc\"\n" + got := tree.String() + + if got != want { + t.Errorf("want:\n%q\ngot:\n%q", want, got) + } +} + +func assertPosition(t *testing.T, text string, ref map[string]Position) { + tree, err := Load(text) + if err != nil { + t.Errorf("Error loading document text: `%v`", text) + t.Errorf("Error: %v", err) + } + for path, pos := range ref { + testPos := tree.GetPosition(path) + if testPos.Invalid() { + t.Errorf("Failed to query tree path or path has invalid position: %s", path) + } else if pos != testPos { + t.Errorf("Expected position %v, got %v instead", pos, testPos) + } + } +} + +func TestDocumentPositions(t *testing.T) { + assertPosition(t, + "[foo]\nbar=42\nbaz=69", + map[string]Position{ + "": {1, 1}, + "foo": {1, 1}, + "foo.bar": {2, 1}, + "foo.baz": {3, 1}, + }) +} + +func TestDocumentPositionsWithSpaces(t *testing.T) { + assertPosition(t, + " [foo]\n bar=42\n baz=69", + map[string]Position{ + "": {1, 1}, + "foo": {1, 3}, + "foo.bar": {2, 3}, + "foo.baz": {3, 3}, + }) +} + +func TestDocumentPositionsWithGroupArray(t *testing.T) { + assertPosition(t, + "[[foo]]\nbar=42\nbaz=69", + map[string]Position{ + "": {1, 1}, + "foo": {1, 1}, + "foo.bar": {2, 1}, + "foo.baz": {3, 1}, + }) +} + +func TestNestedTreePosition(t *testing.T) { + assertPosition(t, + "[foo.bar]\na=42\nb=69", + map[string]Position{ + "": {1, 1}, + "foo": {1, 1}, + "foo.bar": {1, 1}, + "foo.bar.a": {2, 1}, + "foo.bar.b": {3, 1}, + }) +} + +func TestInvalidGroupArray(t *testing.T) { + _, err := Load("[table#key]\nanswer = 42") + if err == nil { + t.Error("Should error") + } + + _, err = Load("[foo.[bar]\na = 42") + if err.Error() != "(1, 2): unexpected token table key cannot contain ']', was expecting a table key" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestDoubleEqual(t *testing.T) { + _, err := Load("foo= = 2") + if err.Error() != "(1, 6): cannot have multiple equals for the same key" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestGroupArrayReassign(t *testing.T) { + _, err := Load("[hello]\n[[hello]]") + if err.Error() != "(2, 3): key \"hello\" is already assigned and not of type table array" { + t.Error("Bad error message:", err.Error()) + } +} + +func TestInvalidFloatParsing(t *testing.T) { + _, err := Load("a=1e_2") + if err.Error() != "(1, 3): invalid use of _ in number" { + t.Error("Bad error message:", err.Error()) + } + + _, err = Load("a=1e2_") + if err.Error() != "(1, 3): invalid use of _ in number" { + t.Error("Bad error message:", err.Error()) + } + + _, err = Load("a=1__2") + if err.Error() != "(1, 3): invalid use of _ in number" { + t.Error("Bad error message:", err.Error()) + } + + _, err = Load("a=_1_2") + if err.Error() != "(1, 3): cannot start number with underscore" { + t.Error("Bad error message:", err.Error()) + } +} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go new file mode 100644 index 000000000..c17bff87b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/position.go @@ -0,0 +1,29 @@ +// Position support for go-toml + +package toml + +import ( + "fmt" +) + +// Position of a document element within a TOML document. +// +// Line and Col are both 1-indexed positions for the element's line number and +// column number, respectively. Values of zero or less will cause Invalid(), +// to return true. +type Position struct { + Line int // line within the document + Col int // column within the line +} + +// String representation of the position. +// Displays 1-indexed line and column numbers. +func (p Position) String() string { + return fmt.Sprintf("(%d, %d)", p.Line, p.Col) +} + +// Invalid returns whether or not the position is valid (i.e. with negative or +// null values) +func (p Position) Invalid() bool { + return p.Line <= 0 || p.Col <= 0 +} diff --git a/vendor/github.com/pelletier/go-toml/position_test.go b/vendor/github.com/pelletier/go-toml/position_test.go new file mode 100644 index 000000000..63ad1afc8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/position_test.go @@ -0,0 +1,29 @@ +// Testing support for go-toml + +package toml + +import ( + "testing" +) + +func TestPositionString(t *testing.T) { + p := Position{123, 456} + expected := "(123, 456)" + value := p.String() + + if value != expected { + t.Errorf("Expected %v, got %v instead", expected, value) + } +} + +func TestInvalid(t *testing.T) { + for i, v := range []Position{ + {0, 1234}, + {1234, 0}, + {0, 0}, + } { + if !v.Invalid() { + t.Errorf("Position at %v is valid: %v", i, v) + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/query/doc.go b/vendor/github.com/pelletier/go-toml/query/doc.go new file mode 100644 index 000000000..ed63c1109 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/doc.go @@ -0,0 +1,175 @@ +// Package query performs JSONPath-like queries on a TOML document. +// +// The query path implementation is based loosely on the JSONPath specification: +// http://goessner.net/articles/JsonPath/. +// +// The idea behind a query path is to allow quick access to any element, or set +// of elements within TOML document, with a single expression. +// +// result, err := query.CompileAndExecute("$.foo.bar.baz", tree) +// +// This is roughly equivalent to: +// +// next := tree.Get("foo") +// if next != nil { +// next = next.Get("bar") +// if next != nil { +// next = next.Get("baz") +// } +// } +// result := next +// +// err is nil if any parsing exception occurs. +// +// If no node in the tree matches the query, result will simply contain an empty list of +// items. +// +// As illustrated above, the query path is much more efficient, especially since +// the structure of the TOML file can vary. Rather than making assumptions about +// a document's structure, a query allows the programmer to make structured +// requests into the document, and get zero or more values as a result. +// +// Query syntax +// +// The syntax of a query begins with a root token, followed by any number +// sub-expressions: +// +// $ +// Root of the TOML tree. This must always come first. +// .name +// Selects child of this node, where 'name' is a TOML key +// name. +// ['name'] +// Selects child of this node, where 'name' is a string +// containing a TOML key name. +// [index] +// Selcts child array element at 'index'. +// ..expr +// Recursively selects all children, filtered by an a union, +// index, or slice expression. +// ..* +// Recursive selection of all nodes at this point in the +// tree. +// .* +// Selects all children of the current node. +// [expr,expr] +// Union operator - a logical 'or' grouping of two or more +// sub-expressions: index, key name, or filter. +// [start:end:step] +// Slice operator - selects array elements from start to +// end-1, at the given step. All three arguments are +// optional. +// [?(filter)] +// Named filter expression - the function 'filter' is +// used to filter children at this node. +// +// Query Indexes And Slices +// +// Index expressions perform no bounds checking, and will contribute no +// values to the result set if the provided index or index range is invalid. +// Negative indexes represent values from the end of the array, counting backwards. +// +// // select the last index of the array named 'foo' +// query.CompileAndExecute("$.foo[-1]", tree) +// +// Slice expressions are supported, by using ':' to separate a start/end index pair. +// +// // select up to the first five elements in the array +// query.CompileAndExecute("$.foo[0:5]", tree) +// +// Slice expressions also allow negative indexes for the start and stop +// arguments. +// +// // select all array elements. +// query.CompileAndExecute("$.foo[0:-1]", tree) +// +// Slice expressions may have an optional stride/step parameter: +// +// // select every other element +// query.CompileAndExecute("$.foo[0:-1:2]", tree) +// +// Slice start and end parameters are also optional: +// +// // these are all equivalent and select all the values in the array +// query.CompileAndExecute("$.foo[:]", tree) +// query.CompileAndExecute("$.foo[0:]", tree) +// query.CompileAndExecute("$.foo[:-1]", tree) +// query.CompileAndExecute("$.foo[0:-1:]", tree) +// query.CompileAndExecute("$.foo[::1]", tree) +// query.CompileAndExecute("$.foo[0::1]", tree) +// query.CompileAndExecute("$.foo[:-1:1]", tree) +// query.CompileAndExecute("$.foo[0:-1:1]", tree) +// +// Query Filters +// +// Query filters are used within a Union [,] or single Filter [] expression. +// A filter only allows nodes that qualify through to the next expression, +// and/or into the result set. +// +// // returns children of foo that are permitted by the 'bar' filter. +// query.CompileAndExecute("$.foo[?(bar)]", tree) +// +// There are several filters provided with the library: +// +// tree +// Allows nodes of type Tree. +// int +// Allows nodes of type int64. +// float +// Allows nodes of type float64. +// string +// Allows nodes of type string. +// time +// Allows nodes of type time.Time. +// bool +// Allows nodes of type bool. +// +// Query Results +// +// An executed query returns a Result object. This contains the nodes +// in the TOML tree that qualify the query expression. Position information +// is also available for each value in the set. +// +// // display the results of a query +// results := query.CompileAndExecute("$.foo.bar.baz", tree) +// for idx, value := results.Values() { +// fmt.Println("%v: %v", results.Positions()[idx], value) +// } +// +// Compiled Queries +// +// Queries may be executed directly on a Tree object, or compiled ahead +// of time and executed discretely. The former is more convenient, but has the +// penalty of having to recompile the query expression each time. +// +// // basic query +// results := query.CompileAndExecute("$.foo.bar.baz", tree) +// +// // compiled query +// query, err := toml.Compile("$.foo.bar.baz") +// results := query.Execute(tree) +// +// // run the compiled query again on a different tree +// moreResults := query.Execute(anotherTree) +// +// User Defined Query Filters +// +// Filter expressions may also be user defined by using the SetFilter() +// function on the Query object. The function must return true/false, which +// signifies if the passed node is kept or discarded, respectively. +// +// // create a query that references a user-defined filter +// query, _ := query.Compile("$[?(bazOnly)]") +// +// // define the filter, and assign it to the query +// query.SetFilter("bazOnly", func(node interface{}) bool{ +// if tree, ok := node.(*Tree); ok { +// return tree.Has("baz") +// } +// return false // reject all other node types +// }) +// +// // run the query +// query.Execute(tree) +// +package query diff --git a/vendor/github.com/pelletier/go-toml/query/lexer.go b/vendor/github.com/pelletier/go-toml/query/lexer.go new file mode 100644 index 000000000..2dc319408 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/lexer.go @@ -0,0 +1,357 @@ +// TOML JSONPath lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package query + +import ( + "fmt" + "github.com/pelletier/go-toml" + "strconv" + "strings" + "unicode/utf8" +) + +// Lexer state function +type queryLexStateFn func() queryLexStateFn + +// Lexer definition +type queryLexer struct { + input string + start int + pos int + width int + tokens chan token + depth int + line int + col int + stringTerm string +} + +func (l *queryLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } + close(l.tokens) +} + +func (l *queryLexer) nextStart() { + // iterate by runes (utf8 characters) + // search for newlines and advance line/col counts + for i := l.start; i < l.pos; { + r, width := utf8.DecodeRuneInString(l.input[i:]) + if r == '\n' { + l.line++ + l.col = 1 + } else { + l.col++ + } + i += width + } + // advance start position to next token + l.start = l.pos +} + +func (l *queryLexer) emit(t tokenType) { + l.tokens <- token{ + Position: toml.Position{Line: l.line, Col: l.col}, + typ: t, + val: l.input[l.start:l.pos], + } + l.nextStart() +} + +func (l *queryLexer) emitWithValue(t tokenType, value string) { + l.tokens <- token{ + Position: toml.Position{Line: l.line, Col: l.col}, + typ: t, + val: value, + } + l.nextStart() +} + +func (l *queryLexer) next() rune { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + var r rune + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + return r +} + +func (l *queryLexer) ignore() { + l.nextStart() +} + +func (l *queryLexer) backup() { + l.pos -= l.width +} + +func (l *queryLexer) errorf(format string, args ...interface{}) queryLexStateFn { + l.tokens <- token{ + Position: toml.Position{Line: l.line, Col: l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + } + return nil +} + +func (l *queryLexer) peek() rune { + r := l.next() + l.backup() + return r +} + +func (l *queryLexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + l.backup() + return false +} + +func (l *queryLexer) follow(next string) bool { + return strings.HasPrefix(l.input[l.pos:], next) +} + +func (l *queryLexer) lexVoid() queryLexStateFn { + for { + next := l.peek() + switch next { + case '$': + l.pos++ + l.emit(tokenDollar) + continue + case '.': + if l.follow("..") { + l.pos += 2 + l.emit(tokenDotDot) + } else { + l.pos++ + l.emit(tokenDot) + } + continue + case '[': + l.pos++ + l.emit(tokenLeftBracket) + continue + case ']': + l.pos++ + l.emit(tokenRightBracket) + continue + case ',': + l.pos++ + l.emit(tokenComma) + continue + case '*': + l.pos++ + l.emit(tokenStar) + continue + case '(': + l.pos++ + l.emit(tokenLeftParen) + continue + case ')': + l.pos++ + l.emit(tokenRightParen) + continue + case '?': + l.pos++ + l.emit(tokenQuestion) + continue + case ':': + l.pos++ + l.emit(tokenColon) + continue + case '\'': + l.ignore() + l.stringTerm = string(next) + return l.lexString + case '"': + l.ignore() + l.stringTerm = string(next) + return l.lexString + } + + if isSpace(next) { + l.next() + l.ignore() + continue + } + + if isAlphanumeric(next) { + return l.lexKey + } + + if next == '+' || next == '-' || isDigit(next) { + return l.lexNumber + } + + if l.next() == eof { + break + } + + return l.errorf("unexpected char: '%v'", next) + } + l.emit(tokenEOF) + return nil +} + +func (l *queryLexer) lexKey() queryLexStateFn { + for { + next := l.peek() + if !isAlphanumeric(next) { + l.emit(tokenKey) + return l.lexVoid + } + + if l.next() == eof { + break + } + } + l.emit(tokenEOF) + return nil +} + +func (l *queryLexer) lexString() queryLexStateFn { + l.pos++ + l.ignore() + growingString := "" + + for { + if l.follow(l.stringTerm) { + l.emitWithValue(tokenString, growingString) + l.pos++ + l.ignore() + return l.lexVoid + } + + if l.follow("\\\"") { + l.pos++ + growingString += "\"" + } else if l.follow("\\'") { + l.pos++ + growingString += "'" + } else if l.follow("\\n") { + l.pos++ + growingString += "\n" + } else if l.follow("\\b") { + l.pos++ + growingString += "\b" + } else if l.follow("\\f") { + l.pos++ + growingString += "\f" + } else if l.follow("\\/") { + l.pos++ + growingString += "/" + } else if l.follow("\\t") { + l.pos++ + growingString += "\t" + } else if l.follow("\\r") { + l.pos++ + growingString += "\r" + } else if l.follow("\\\\") { + l.pos++ + growingString += "\\" + } else if l.follow("\\u") { + l.pos += 2 + code := "" + for i := 0; i < 4; i++ { + c := l.peek() + l.pos++ + if !isHexDigit(c) { + return l.errorf("unfinished unicode escape") + } + code = code + string(c) + } + l.pos-- + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return l.errorf("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + } else if l.follow("\\U") { + l.pos += 2 + code := "" + for i := 0; i < 8; i++ { + c := l.peek() + l.pos++ + if !isHexDigit(c) { + return l.errorf("unfinished unicode escape") + } + code = code + string(c) + } + l.pos-- + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return l.errorf("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + } else if l.follow("\\") { + l.pos++ + return l.errorf("invalid escape sequence: \\" + string(l.peek())) + } else { + growingString += string(l.peek()) + } + + if l.next() == eof { + break + } + } + + return l.errorf("unclosed string") +} + +func (l *queryLexer) lexNumber() queryLexStateFn { + l.ignore() + if !l.accept("+") { + l.accept("-") + } + pointSeen := false + digitSeen := false + for { + next := l.next() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if isDigit(next) { + digitSeen = true + } else { + l.backup() + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexVoid +} + +// Entry point +func lexQuery(input string) chan token { + l := &queryLexer{ + input: input, + tokens: make(chan token), + line: 1, + col: 1, + } + go l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/query/lexer_test.go b/vendor/github.com/pelletier/go-toml/query/lexer_test.go new file mode 100644 index 000000000..8ce0501fe --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/lexer_test.go @@ -0,0 +1,179 @@ +package query + +import ( + "github.com/pelletier/go-toml" + "testing" +) + +func testQLFlow(t *testing.T, input string, expectedFlow []token) { + ch := lexQuery(input) + for idx, expected := range expectedFlow { + token := <-ch + if token != expected { + t.Log("While testing #", idx, ":", input) + t.Log("compared (got)", token, "to (expected)", expected) + t.Log("\tvalue:", token.val, "<->", expected.val) + t.Log("\tvalue as bytes:", []byte(token.val), "<->", []byte(expected.val)) + t.Log("\ttype:", token.typ.String(), "<->", expected.typ.String()) + t.Log("\tline:", token.Line, "<->", expected.Line) + t.Log("\tcolumn:", token.Col, "<->", expected.Col) + t.Log("compared", token, "to", expected) + t.FailNow() + } + } + + tok, ok := <-ch + if ok { + t.Log("channel is not closed!") + t.Log(len(ch)+1, "tokens remaining:") + + t.Log("token ->", tok) + for token := range ch { + t.Log("token ->", token) + } + t.FailNow() + } +} + +func TestLexSpecialChars(t *testing.T) { + testQLFlow(t, " .$[]..()?*", []token{ + {toml.Position{1, 2}, tokenDot, "."}, + {toml.Position{1, 3}, tokenDollar, "$"}, + {toml.Position{1, 4}, tokenLeftBracket, "["}, + {toml.Position{1, 5}, tokenRightBracket, "]"}, + {toml.Position{1, 6}, tokenDotDot, ".."}, + {toml.Position{1, 8}, tokenLeftParen, "("}, + {toml.Position{1, 9}, tokenRightParen, ")"}, + {toml.Position{1, 10}, tokenQuestion, "?"}, + {toml.Position{1, 11}, tokenStar, "*"}, + {toml.Position{1, 12}, tokenEOF, ""}, + }) +} + +func TestLexString(t *testing.T) { + testQLFlow(t, "'foo\n'", []token{ + {toml.Position{1, 2}, tokenString, "foo\n"}, + {toml.Position{2, 2}, tokenEOF, ""}, + }) +} + +func TestLexDoubleString(t *testing.T) { + testQLFlow(t, `"bar"`, []token{ + {toml.Position{1, 2}, tokenString, "bar"}, + {toml.Position{1, 6}, tokenEOF, ""}, + }) +} + +func TestLexStringEscapes(t *testing.T) { + testQLFlow(t, `"foo \" \' \b \f \/ \t \r \\ \u03A9 \U00012345 \n bar"`, []token{ + {toml.Position{1, 2}, tokenString, "foo \" ' \b \f / \t \r \\ \u03A9 \U00012345 \n bar"}, + {toml.Position{1, 55}, tokenEOF, ""}, + }) +} + +func TestLexStringUnfinishedUnicode4(t *testing.T) { + testQLFlow(t, `"\u000"`, []token{ + {toml.Position{1, 2}, tokenError, "unfinished unicode escape"}, + }) +} + +func TestLexStringUnfinishedUnicode8(t *testing.T) { + testQLFlow(t, `"\U0000"`, []token{ + {toml.Position{1, 2}, tokenError, "unfinished unicode escape"}, + }) +} + +func TestLexStringInvalidEscape(t *testing.T) { + testQLFlow(t, `"\x"`, []token{ + {toml.Position{1, 2}, tokenError, "invalid escape sequence: \\x"}, + }) +} + +func TestLexStringUnfinished(t *testing.T) { + testQLFlow(t, `"bar`, []token{ + {toml.Position{1, 2}, tokenError, "unclosed string"}, + }) +} + +func TestLexKey(t *testing.T) { + testQLFlow(t, "foo", []token{ + {toml.Position{1, 1}, tokenKey, "foo"}, + {toml.Position{1, 4}, tokenEOF, ""}, + }) +} + +func TestLexRecurse(t *testing.T) { + testQLFlow(t, "$..*", []token{ + {toml.Position{1, 1}, tokenDollar, "$"}, + {toml.Position{1, 2}, tokenDotDot, ".."}, + {toml.Position{1, 4}, tokenStar, "*"}, + {toml.Position{1, 5}, tokenEOF, ""}, + }) +} + +func TestLexBracketKey(t *testing.T) { + testQLFlow(t, "$[foo]", []token{ + {toml.Position{1, 1}, tokenDollar, "$"}, + {toml.Position{1, 2}, tokenLeftBracket, "["}, + {toml.Position{1, 3}, tokenKey, "foo"}, + {toml.Position{1, 6}, tokenRightBracket, "]"}, + {toml.Position{1, 7}, tokenEOF, ""}, + }) +} + +func TestLexSpace(t *testing.T) { + testQLFlow(t, "foo bar baz", []token{ + {toml.Position{1, 1}, tokenKey, "foo"}, + {toml.Position{1, 5}, tokenKey, "bar"}, + {toml.Position{1, 9}, tokenKey, "baz"}, + {toml.Position{1, 12}, tokenEOF, ""}, + }) +} + +func TestLexInteger(t *testing.T) { + testQLFlow(t, "100 +200 -300", []token{ + {toml.Position{1, 1}, tokenInteger, "100"}, + {toml.Position{1, 5}, tokenInteger, "+200"}, + {toml.Position{1, 10}, tokenInteger, "-300"}, + {toml.Position{1, 14}, tokenEOF, ""}, + }) +} + +func TestLexFloat(t *testing.T) { + testQLFlow(t, "100.0 +200.0 -300.0", []token{ + {toml.Position{1, 1}, tokenFloat, "100.0"}, + {toml.Position{1, 7}, tokenFloat, "+200.0"}, + {toml.Position{1, 14}, tokenFloat, "-300.0"}, + {toml.Position{1, 20}, tokenEOF, ""}, + }) +} + +func TestLexFloatWithMultipleDots(t *testing.T) { + testQLFlow(t, "4.2.", []token{ + {toml.Position{1, 1}, tokenError, "cannot have two dots in one float"}, + }) +} + +func TestLexFloatLeadingDot(t *testing.T) { + testQLFlow(t, "+.1", []token{ + {toml.Position{1, 1}, tokenError, "cannot start float with a dot"}, + }) +} + +func TestLexFloatWithTrailingDot(t *testing.T) { + testQLFlow(t, "42.", []token{ + {toml.Position{1, 1}, tokenError, "float cannot end with a dot"}, + }) +} + +func TestLexNumberWithoutDigit(t *testing.T) { + testQLFlow(t, "+", []token{ + {toml.Position{1, 1}, tokenError, "no digit in that number"}, + }) +} + +func TestLexUnknown(t *testing.T) { + testQLFlow(t, "^", []token{ + {toml.Position{1, 1}, tokenError, "unexpected char: '94'"}, + }) +} diff --git a/vendor/github.com/pelletier/go-toml/query/match.go b/vendor/github.com/pelletier/go-toml/query/match.go new file mode 100644 index 000000000..d7bb15a45 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/match.go @@ -0,0 +1,232 @@ +package query + +import ( + "fmt" + "github.com/pelletier/go-toml" +) + +// base match +type matchBase struct { + next pathFn +} + +func (f *matchBase) setNext(next pathFn) { + f.next = next +} + +// terminating functor - gathers results +type terminatingFn struct { + // empty +} + +func newTerminatingFn() *terminatingFn { + return &terminatingFn{} +} + +func (f *terminatingFn) setNext(next pathFn) { + // do nothing +} + +func (f *terminatingFn) call(node interface{}, ctx *queryContext) { + ctx.result.appendResult(node, ctx.lastPosition) +} + +// match single key +type matchKeyFn struct { + matchBase + Name string +} + +func newMatchKeyFn(name string) *matchKeyFn { + return &matchKeyFn{Name: name} +} + +func (f *matchKeyFn) call(node interface{}, ctx *queryContext) { + if array, ok := node.([]*toml.Tree); ok { + for _, tree := range array { + item := tree.Get(f.Name) + if item != nil { + ctx.lastPosition = tree.GetPosition(f.Name) + f.next.call(item, ctx) + } + } + } else if tree, ok := node.(*toml.Tree); ok { + item := tree.Get(f.Name) + if item != nil { + ctx.lastPosition = tree.GetPosition(f.Name) + f.next.call(item, ctx) + } + } +} + +// match single index +type matchIndexFn struct { + matchBase + Idx int +} + +func newMatchIndexFn(idx int) *matchIndexFn { + return &matchIndexFn{Idx: idx} +} + +func (f *matchIndexFn) call(node interface{}, ctx *queryContext) { + if arr, ok := node.([]interface{}); ok { + if f.Idx < len(arr) && f.Idx >= 0 { + if treesArray, ok := node.([]*toml.Tree); ok { + if len(treesArray) > 0 { + ctx.lastPosition = treesArray[0].Position() + } + } + f.next.call(arr[f.Idx], ctx) + } + } +} + +// filter by slicing +type matchSliceFn struct { + matchBase + Start, End, Step int +} + +func newMatchSliceFn(start, end, step int) *matchSliceFn { + return &matchSliceFn{Start: start, End: end, Step: step} +} + +func (f *matchSliceFn) call(node interface{}, ctx *queryContext) { + if arr, ok := node.([]interface{}); ok { + // adjust indexes for negative values, reverse ordering + realStart, realEnd := f.Start, f.End + if realStart < 0 { + realStart = len(arr) + realStart + } + if realEnd < 0 { + realEnd = len(arr) + realEnd + } + if realEnd < realStart { + realEnd, realStart = realStart, realEnd // swap + } + // loop and gather + for idx := realStart; idx < realEnd; idx += f.Step { + if treesArray, ok := node.([]*toml.Tree); ok { + if len(treesArray) > 0 { + ctx.lastPosition = treesArray[0].Position() + } + } + f.next.call(arr[idx], ctx) + } + } +} + +// match anything +type matchAnyFn struct { + matchBase +} + +func newMatchAnyFn() *matchAnyFn { + return &matchAnyFn{} +} + +func (f *matchAnyFn) call(node interface{}, ctx *queryContext) { + if tree, ok := node.(*toml.Tree); ok { + for _, k := range tree.Keys() { + v := tree.Get(k) + ctx.lastPosition = tree.GetPosition(k) + f.next.call(v, ctx) + } + } +} + +// filter through union +type matchUnionFn struct { + Union []pathFn +} + +func (f *matchUnionFn) setNext(next pathFn) { + for _, fn := range f.Union { + fn.setNext(next) + } +} + +func (f *matchUnionFn) call(node interface{}, ctx *queryContext) { + for _, fn := range f.Union { + fn.call(node, ctx) + } +} + +// match every single last node in the tree +type matchRecursiveFn struct { + matchBase +} + +func newMatchRecursiveFn() *matchRecursiveFn { + return &matchRecursiveFn{} +} + +func (f *matchRecursiveFn) call(node interface{}, ctx *queryContext) { + originalPosition := ctx.lastPosition + if tree, ok := node.(*toml.Tree); ok { + var visit func(tree *toml.Tree) + visit = func(tree *toml.Tree) { + for _, k := range tree.Keys() { + v := tree.Get(k) + ctx.lastPosition = tree.GetPosition(k) + f.next.call(v, ctx) + switch node := v.(type) { + case *toml.Tree: + visit(node) + case []*toml.Tree: + for _, subtree := range node { + visit(subtree) + } + } + } + } + ctx.lastPosition = originalPosition + f.next.call(tree, ctx) + visit(tree) + } +} + +// match based on an externally provided functional filter +type matchFilterFn struct { + matchBase + Pos toml.Position + Name string +} + +func newMatchFilterFn(name string, pos toml.Position) *matchFilterFn { + return &matchFilterFn{Name: name, Pos: pos} +} + +func (f *matchFilterFn) call(node interface{}, ctx *queryContext) { + fn, ok := (*ctx.filters)[f.Name] + if !ok { + panic(fmt.Sprintf("%s: query context does not have filter '%s'", + f.Pos.String(), f.Name)) + } + switch castNode := node.(type) { + case *toml.Tree: + for _, k := range castNode.Keys() { + v := castNode.Get(k) + if fn(v) { + ctx.lastPosition = castNode.GetPosition(k) + f.next.call(v, ctx) + } + } + case []*toml.Tree: + for _, v := range castNode { + if fn(v) { + if len(castNode) > 0 { + ctx.lastPosition = castNode[0].Position() + } + f.next.call(v, ctx) + } + } + case []interface{}: + for _, v := range castNode { + if fn(v) { + f.next.call(v, ctx) + } + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/query/match_test.go b/vendor/github.com/pelletier/go-toml/query/match_test.go new file mode 100644 index 000000000..429b8f6b9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/match_test.go @@ -0,0 +1,202 @@ +package query + +import ( + "fmt" + "github.com/pelletier/go-toml" + "testing" +) + +// dump path tree to a string +func pathString(root pathFn) string { + result := fmt.Sprintf("%T:", root) + switch fn := root.(type) { + case *terminatingFn: + result += "{}" + case *matchKeyFn: + result += fmt.Sprintf("{%s}", fn.Name) + result += pathString(fn.next) + case *matchIndexFn: + result += fmt.Sprintf("{%d}", fn.Idx) + result += pathString(fn.next) + case *matchSliceFn: + result += fmt.Sprintf("{%d:%d:%d}", + fn.Start, fn.End, fn.Step) + result += pathString(fn.next) + case *matchAnyFn: + result += "{}" + result += pathString(fn.next) + case *matchUnionFn: + result += "{[" + for _, v := range fn.Union { + result += pathString(v) + ", " + } + result += "]}" + case *matchRecursiveFn: + result += "{}" + result += pathString(fn.next) + case *matchFilterFn: + result += fmt.Sprintf("{%s}", fn.Name) + result += pathString(fn.next) + } + return result +} + +func assertPathMatch(t *testing.T, path, ref *Query) bool { + pathStr := pathString(path.root) + refStr := pathString(ref.root) + if pathStr != refStr { + t.Errorf("paths do not match") + t.Log("test:", pathStr) + t.Log("ref: ", refStr) + return false + } + return true +} + +func assertPath(t *testing.T, query string, ref *Query) { + path, _ := parseQuery(lexQuery(query)) + assertPathMatch(t, path, ref) +} + +func buildPath(parts ...pathFn) *Query { + query := newQuery() + for _, v := range parts { + query.appendPath(v) + } + return query +} + +func TestPathRoot(t *testing.T) { + assertPath(t, + "$", + buildPath( + // empty + )) +} + +func TestPathKey(t *testing.T) { + assertPath(t, + "$.foo", + buildPath( + newMatchKeyFn("foo"), + )) +} + +func TestPathBracketKey(t *testing.T) { + assertPath(t, + "$[foo]", + buildPath( + newMatchKeyFn("foo"), + )) +} + +func TestPathBracketStringKey(t *testing.T) { + assertPath(t, + "$['foo']", + buildPath( + newMatchKeyFn("foo"), + )) +} + +func TestPathIndex(t *testing.T) { + assertPath(t, + "$[123]", + buildPath( + newMatchIndexFn(123), + )) +} + +func TestPathSliceStart(t *testing.T) { + assertPath(t, + "$[123:]", + buildPath( + newMatchSliceFn(123, maxInt, 1), + )) +} + +func TestPathSliceStartEnd(t *testing.T) { + assertPath(t, + "$[123:456]", + buildPath( + newMatchSliceFn(123, 456, 1), + )) +} + +func TestPathSliceStartEndColon(t *testing.T) { + assertPath(t, + "$[123:456:]", + buildPath( + newMatchSliceFn(123, 456, 1), + )) +} + +func TestPathSliceStartStep(t *testing.T) { + assertPath(t, + "$[123::7]", + buildPath( + newMatchSliceFn(123, maxInt, 7), + )) +} + +func TestPathSliceEndStep(t *testing.T) { + assertPath(t, + "$[:456:7]", + buildPath( + newMatchSliceFn(0, 456, 7), + )) +} + +func TestPathSliceStep(t *testing.T) { + assertPath(t, + "$[::7]", + buildPath( + newMatchSliceFn(0, maxInt, 7), + )) +} + +func TestPathSliceAll(t *testing.T) { + assertPath(t, + "$[123:456:7]", + buildPath( + newMatchSliceFn(123, 456, 7), + )) +} + +func TestPathAny(t *testing.T) { + assertPath(t, + "$.*", + buildPath( + newMatchAnyFn(), + )) +} + +func TestPathUnion(t *testing.T) { + assertPath(t, + "$[foo, bar, baz]", + buildPath( + &matchUnionFn{[]pathFn{ + newMatchKeyFn("foo"), + newMatchKeyFn("bar"), + newMatchKeyFn("baz"), + }}, + )) +} + +func TestPathRecurse(t *testing.T) { + assertPath(t, + "$..*", + buildPath( + newMatchRecursiveFn(), + )) +} + +func TestPathFilterExpr(t *testing.T) { + assertPath(t, + "$[?('foo'),?(bar)]", + buildPath( + &matchUnionFn{[]pathFn{ + newMatchFilterFn("foo", toml.Position{}), + newMatchFilterFn("bar", toml.Position{}), + }}, + )) +} diff --git a/vendor/github.com/pelletier/go-toml/query/parser.go b/vendor/github.com/pelletier/go-toml/query/parser.go new file mode 100644 index 000000000..5f69b70d4 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/parser.go @@ -0,0 +1,275 @@ +/* + Based on the "jsonpath" spec/concept. + + http://goessner.net/articles/JsonPath/ + https://code.google.com/p/json-path/ +*/ + +package query + +import ( + "fmt" +) + +const maxInt = int(^uint(0) >> 1) + +type queryParser struct { + flow chan token + tokensBuffer []token + query *Query + union []pathFn + err error +} + +type queryParserStateFn func() queryParserStateFn + +// Formats and panics an error message based on a token +func (p *queryParser) parseError(tok *token, msg string, args ...interface{}) queryParserStateFn { + p.err = fmt.Errorf(tok.Position.String()+": "+msg, args...) + return nil // trigger parse to end +} + +func (p *queryParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *queryParser) backup(tok *token) { + p.tokensBuffer = append(p.tokensBuffer, *tok) +} + +func (p *queryParser) peek() *token { + if len(p.tokensBuffer) != 0 { + return &(p.tokensBuffer[0]) + } + + tok, ok := <-p.flow + if !ok { + return nil + } + p.backup(&tok) + return &tok +} + +func (p *queryParser) lookahead(types ...tokenType) bool { + result := true + buffer := []token{} + + for _, typ := range types { + tok := p.getToken() + if tok == nil { + result = false + break + } + buffer = append(buffer, *tok) + if tok.typ != typ { + result = false + break + } + } + // add the tokens back to the buffer, and return + p.tokensBuffer = append(p.tokensBuffer, buffer...) + return result +} + +func (p *queryParser) getToken() *token { + if len(p.tokensBuffer) != 0 { + tok := p.tokensBuffer[0] + p.tokensBuffer = p.tokensBuffer[1:] + return &tok + } + tok, ok := <-p.flow + if !ok { + return nil + } + return &tok +} + +func (p *queryParser) parseStart() queryParserStateFn { + tok := p.getToken() + + if tok == nil || tok.typ == tokenEOF { + return nil + } + + if tok.typ != tokenDollar { + return p.parseError(tok, "Expected '$' at start of expression") + } + + return p.parseMatchExpr +} + +// handle '.' prefix, '[]', and '..' +func (p *queryParser) parseMatchExpr() queryParserStateFn { + tok := p.getToken() + switch tok.typ { + case tokenDotDot: + p.query.appendPath(&matchRecursiveFn{}) + // nested parse for '..' + tok := p.getToken() + switch tok.typ { + case tokenKey: + p.query.appendPath(newMatchKeyFn(tok.val)) + return p.parseMatchExpr + case tokenLeftBracket: + return p.parseBracketExpr + case tokenStar: + // do nothing - the recursive predicate is enough + return p.parseMatchExpr + } + + case tokenDot: + // nested parse for '.' + tok := p.getToken() + switch tok.typ { + case tokenKey: + p.query.appendPath(newMatchKeyFn(tok.val)) + return p.parseMatchExpr + case tokenStar: + p.query.appendPath(&matchAnyFn{}) + return p.parseMatchExpr + } + + case tokenLeftBracket: + return p.parseBracketExpr + + case tokenEOF: + return nil // allow EOF at this stage + } + return p.parseError(tok, "expected match expression") +} + +func (p *queryParser) parseBracketExpr() queryParserStateFn { + if p.lookahead(tokenInteger, tokenColon) { + return p.parseSliceExpr + } + if p.peek().typ == tokenColon { + return p.parseSliceExpr + } + return p.parseUnionExpr +} + +func (p *queryParser) parseUnionExpr() queryParserStateFn { + var tok *token + + // this state can be traversed after some sub-expressions + // so be careful when setting up state in the parser + if p.union == nil { + p.union = []pathFn{} + } + +loop: // labeled loop for easy breaking + for { + if len(p.union) > 0 { + // parse delimiter or terminator + tok = p.getToken() + switch tok.typ { + case tokenComma: + // do nothing + case tokenRightBracket: + break loop + default: + return p.parseError(tok, "expected ',' or ']', not '%s'", tok.val) + } + } + + // parse sub expression + tok = p.getToken() + switch tok.typ { + case tokenInteger: + p.union = append(p.union, newMatchIndexFn(tok.Int())) + case tokenKey: + p.union = append(p.union, newMatchKeyFn(tok.val)) + case tokenString: + p.union = append(p.union, newMatchKeyFn(tok.val)) + case tokenQuestion: + return p.parseFilterExpr + default: + return p.parseError(tok, "expected union sub expression, not '%s', %d", tok.val, len(p.union)) + } + } + + // if there is only one sub-expression, use that instead + if len(p.union) == 1 { + p.query.appendPath(p.union[0]) + } else { + p.query.appendPath(&matchUnionFn{p.union}) + } + + p.union = nil // clear out state + return p.parseMatchExpr +} + +func (p *queryParser) parseSliceExpr() queryParserStateFn { + // init slice to grab all elements + start, end, step := 0, maxInt, 1 + + // parse optional start + tok := p.getToken() + if tok.typ == tokenInteger { + start = tok.Int() + tok = p.getToken() + } + if tok.typ != tokenColon { + return p.parseError(tok, "expected ':'") + } + + // parse optional end + tok = p.getToken() + if tok.typ == tokenInteger { + end = tok.Int() + tok = p.getToken() + } + if tok.typ == tokenRightBracket { + p.query.appendPath(newMatchSliceFn(start, end, step)) + return p.parseMatchExpr + } + if tok.typ != tokenColon { + return p.parseError(tok, "expected ']' or ':'") + } + + // parse optional step + tok = p.getToken() + if tok.typ == tokenInteger { + step = tok.Int() + if step < 0 { + return p.parseError(tok, "step must be a positive value") + } + tok = p.getToken() + } + if tok.typ != tokenRightBracket { + return p.parseError(tok, "expected ']'") + } + + p.query.appendPath(newMatchSliceFn(start, end, step)) + return p.parseMatchExpr +} + +func (p *queryParser) parseFilterExpr() queryParserStateFn { + tok := p.getToken() + if tok.typ != tokenLeftParen { + return p.parseError(tok, "expected left-parenthesis for filter expression") + } + tok = p.getToken() + if tok.typ != tokenKey && tok.typ != tokenString { + return p.parseError(tok, "expected key or string for filter function name") + } + name := tok.val + tok = p.getToken() + if tok.typ != tokenRightParen { + return p.parseError(tok, "expected right-parenthesis for filter expression") + } + p.union = append(p.union, newMatchFilterFn(name, tok.Position)) + return p.parseUnionExpr +} + +func parseQuery(flow chan token) (*Query, error) { + parser := &queryParser{ + flow: flow, + tokensBuffer: []token{}, + query: newQuery(), + } + parser.run() + return parser.query, parser.err +} diff --git a/vendor/github.com/pelletier/go-toml/query/parser_test.go b/vendor/github.com/pelletier/go-toml/query/parser_test.go new file mode 100644 index 000000000..473896a02 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/parser_test.go @@ -0,0 +1,482 @@ +package query + +import ( + "fmt" + "github.com/pelletier/go-toml" + "io/ioutil" + "sort" + "strings" + "testing" + "time" +) + +type queryTestNode struct { + value interface{} + position toml.Position +} + +func valueString(root interface{}) string { + result := "" //fmt.Sprintf("%T:", root) + switch node := root.(type) { + case *Result: + items := []string{} + for i, v := range node.Values() { + items = append(items, fmt.Sprintf("%s:%s", + node.Positions()[i].String(), valueString(v))) + } + sort.Strings(items) + result = "[" + strings.Join(items, ", ") + "]" + case queryTestNode: + result = fmt.Sprintf("%s:%s", + node.position.String(), valueString(node.value)) + case []interface{}: + items := []string{} + for _, v := range node { + items = append(items, valueString(v)) + } + sort.Strings(items) + result = "[" + strings.Join(items, ", ") + "]" + case *toml.Tree: + // workaround for unreliable map key ordering + items := []string{} + for _, k := range node.Keys() { + v := node.GetPath([]string{k}) + items = append(items, k+":"+valueString(v)) + } + sort.Strings(items) + result = "{" + strings.Join(items, ", ") + "}" + case map[string]interface{}: + // workaround for unreliable map key ordering + items := []string{} + for k, v := range node { + items = append(items, k+":"+valueString(v)) + } + sort.Strings(items) + result = "{" + strings.Join(items, ", ") + "}" + case int64: + result += fmt.Sprintf("%d", node) + case string: + result += "'" + node + "'" + case float64: + result += fmt.Sprintf("%f", node) + case bool: + result += fmt.Sprintf("%t", node) + case time.Time: + result += fmt.Sprintf("'%v'", node) + } + return result +} + +func assertValue(t *testing.T, result, ref interface{}) { + pathStr := valueString(result) + refStr := valueString(ref) + if pathStr != refStr { + t.Errorf("values do not match") + t.Log("test:", pathStr) + t.Log("ref: ", refStr) + } +} + +func assertQueryPositions(t *testing.T, tomlDoc string, query string, ref []interface{}) { + tree, err := toml.Load(tomlDoc) + if err != nil { + t.Errorf("Non-nil toml parse error: %v", err) + return + } + q, err := Compile(query) + if err != nil { + t.Error(err) + return + } + results := q.Execute(tree) + assertValue(t, results, ref) +} + +func TestQueryRoot(t *testing.T) { + assertQueryPositions(t, + "a = 42", + "$", + []interface{}{ + queryTestNode{ + map[string]interface{}{ + "a": int64(42), + }, toml.Position{1, 1}, + }, + }) +} + +func TestQueryKey(t *testing.T) { + assertQueryPositions(t, + "[foo]\na = 42", + "$.foo.a", + []interface{}{ + queryTestNode{ + int64(42), toml.Position{2, 1}, + }, + }) +} + +func TestQueryKeyString(t *testing.T) { + assertQueryPositions(t, + "[foo]\na = 42", + "$.foo['a']", + []interface{}{ + queryTestNode{ + int64(42), toml.Position{2, 1}, + }, + }) +} + +func TestQueryIndex(t *testing.T) { + assertQueryPositions(t, + "[foo]\na = [1,2,3,4,5,6,7,8,9,0]", + "$.foo.a[5]", + []interface{}{ + queryTestNode{ + int64(6), toml.Position{2, 1}, + }, + }) +} + +func TestQuerySliceRange(t *testing.T) { + assertQueryPositions(t, + "[foo]\na = [1,2,3,4,5,6,7,8,9,0]", + "$.foo.a[0:5]", + []interface{}{ + queryTestNode{ + int64(1), toml.Position{2, 1}, + }, + queryTestNode{ + int64(2), toml.Position{2, 1}, + }, + queryTestNode{ + int64(3), toml.Position{2, 1}, + }, + queryTestNode{ + int64(4), toml.Position{2, 1}, + }, + queryTestNode{ + int64(5), toml.Position{2, 1}, + }, + }) +} + +func TestQuerySliceStep(t *testing.T) { + assertQueryPositions(t, + "[foo]\na = [1,2,3,4,5,6,7,8,9,0]", + "$.foo.a[0:5:2]", + []interface{}{ + queryTestNode{ + int64(1), toml.Position{2, 1}, + }, + queryTestNode{ + int64(3), toml.Position{2, 1}, + }, + queryTestNode{ + int64(5), toml.Position{2, 1}, + }, + }) +} + +func TestQueryAny(t *testing.T) { + assertQueryPositions(t, + "[foo.bar]\na=1\nb=2\n[foo.baz]\na=3\nb=4", + "$.foo.*", + []interface{}{ + queryTestNode{ + map[string]interface{}{ + "a": int64(1), + "b": int64(2), + }, toml.Position{1, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "a": int64(3), + "b": int64(4), + }, toml.Position{4, 1}, + }, + }) +} +func TestQueryUnionSimple(t *testing.T) { + assertQueryPositions(t, + "[foo.bar]\na=1\nb=2\n[baz.foo]\na=3\nb=4\n[gorf.foo]\na=5\nb=6", + "$.*[bar,foo]", + []interface{}{ + queryTestNode{ + map[string]interface{}{ + "a": int64(1), + "b": int64(2), + }, toml.Position{1, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "a": int64(3), + "b": int64(4), + }, toml.Position{4, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "a": int64(5), + "b": int64(6), + }, toml.Position{7, 1}, + }, + }) +} + +func TestQueryRecursionAll(t *testing.T) { + assertQueryPositions(t, + "[foo.bar]\na=1\nb=2\n[baz.foo]\na=3\nb=4\n[gorf.foo]\na=5\nb=6", + "$..*", + []interface{}{ + queryTestNode{ + map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": map[string]interface{}{ + "a": int64(1), + "b": int64(2), + }, + }, + "baz": map[string]interface{}{ + "foo": map[string]interface{}{ + "a": int64(3), + "b": int64(4), + }, + }, + "gorf": map[string]interface{}{ + "foo": map[string]interface{}{ + "a": int64(5), + "b": int64(6), + }, + }, + }, toml.Position{1, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "bar": map[string]interface{}{ + "a": int64(1), + "b": int64(2), + }, + }, toml.Position{1, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "a": int64(1), + "b": int64(2), + }, toml.Position{1, 1}, + }, + queryTestNode{ + int64(1), toml.Position{2, 1}, + }, + queryTestNode{ + int64(2), toml.Position{3, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "foo": map[string]interface{}{ + "a": int64(3), + "b": int64(4), + }, + }, toml.Position{4, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "a": int64(3), + "b": int64(4), + }, toml.Position{4, 1}, + }, + queryTestNode{ + int64(3), toml.Position{5, 1}, + }, + queryTestNode{ + int64(4), toml.Position{6, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "foo": map[string]interface{}{ + "a": int64(5), + "b": int64(6), + }, + }, toml.Position{7, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "a": int64(5), + "b": int64(6), + }, toml.Position{7, 1}, + }, + queryTestNode{ + int64(5), toml.Position{8, 1}, + }, + queryTestNode{ + int64(6), toml.Position{9, 1}, + }, + }) +} + +func TestQueryRecursionUnionSimple(t *testing.T) { + assertQueryPositions(t, + "[foo.bar]\na=1\nb=2\n[baz.foo]\na=3\nb=4\n[gorf.foo]\na=5\nb=6", + "$..['foo','bar']", + []interface{}{ + queryTestNode{ + map[string]interface{}{ + "bar": map[string]interface{}{ + "a": int64(1), + "b": int64(2), + }, + }, toml.Position{1, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "a": int64(3), + "b": int64(4), + }, toml.Position{4, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "a": int64(1), + "b": int64(2), + }, toml.Position{1, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "a": int64(5), + "b": int64(6), + }, toml.Position{7, 1}, + }, + }) +} + +func TestQueryFilterFn(t *testing.T) { + buff, err := ioutil.ReadFile("../example.toml") + if err != nil { + t.Error(err) + return + } + + assertQueryPositions(t, string(buff), + "$..[?(int)]", + []interface{}{ + queryTestNode{ + int64(8001), toml.Position{13, 1}, + }, + queryTestNode{ + int64(8001), toml.Position{13, 1}, + }, + queryTestNode{ + int64(8002), toml.Position{13, 1}, + }, + queryTestNode{ + int64(5000), toml.Position{14, 1}, + }, + }) + + assertQueryPositions(t, string(buff), + "$..[?(string)]", + []interface{}{ + queryTestNode{ + "TOML Example", toml.Position{3, 1}, + }, + queryTestNode{ + "Tom Preston-Werner", toml.Position{6, 1}, + }, + queryTestNode{ + "GitHub", toml.Position{7, 1}, + }, + queryTestNode{ + "GitHub Cofounder & CEO\nLikes tater tots and beer.", + toml.Position{8, 1}, + }, + queryTestNode{ + "192.168.1.1", toml.Position{12, 1}, + }, + queryTestNode{ + "10.0.0.1", toml.Position{21, 3}, + }, + queryTestNode{ + "eqdc10", toml.Position{22, 3}, + }, + queryTestNode{ + "10.0.0.2", toml.Position{25, 3}, + }, + queryTestNode{ + "eqdc10", toml.Position{26, 3}, + }, + }) + + assertQueryPositions(t, string(buff), + "$..[?(float)]", + []interface{}{ + // no float values in document + }) + + tv, _ := time.Parse(time.RFC3339, "1979-05-27T07:32:00Z") + assertQueryPositions(t, string(buff), + "$..[?(tree)]", + []interface{}{ + queryTestNode{ + map[string]interface{}{ + "name": "Tom Preston-Werner", + "organization": "GitHub", + "bio": "GitHub Cofounder & CEO\nLikes tater tots and beer.", + "dob": tv, + }, toml.Position{5, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "server": "192.168.1.1", + "ports": []interface{}{int64(8001), int64(8001), int64(8002)}, + "connection_max": int64(5000), + "enabled": true, + }, toml.Position{11, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "alpha": map[string]interface{}{ + "ip": "10.0.0.1", + "dc": "eqdc10", + }, + "beta": map[string]interface{}{ + "ip": "10.0.0.2", + "dc": "eqdc10", + }, + }, toml.Position{17, 1}, + }, + queryTestNode{ + map[string]interface{}{ + "ip": "10.0.0.1", + "dc": "eqdc10", + }, toml.Position{20, 3}, + }, + queryTestNode{ + map[string]interface{}{ + "ip": "10.0.0.2", + "dc": "eqdc10", + }, toml.Position{24, 3}, + }, + queryTestNode{ + map[string]interface{}{ + "data": []interface{}{ + []interface{}{"gamma", "delta"}, + []interface{}{int64(1), int64(2)}, + }, + }, toml.Position{28, 1}, + }, + }) + + assertQueryPositions(t, string(buff), + "$..[?(time)]", + []interface{}{ + queryTestNode{ + tv, toml.Position{9, 1}, + }, + }) + + assertQueryPositions(t, string(buff), + "$..[?(bool)]", + []interface{}{ + queryTestNode{ + true, toml.Position{15, 1}, + }, + }) +} diff --git a/vendor/github.com/pelletier/go-toml/query/query.go b/vendor/github.com/pelletier/go-toml/query/query.go new file mode 100644 index 000000000..1c6cd8014 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/query.go @@ -0,0 +1,158 @@ +package query + +import ( + "time" + + "github.com/pelletier/go-toml" +) + +// NodeFilterFn represents a user-defined filter function, for use with +// Query.SetFilter(). +// +// The return value of the function must indicate if 'node' is to be included +// at this stage of the TOML path. Returning true will include the node, and +// returning false will exclude it. +// +// NOTE: Care should be taken to write script callbacks such that they are safe +// to use from multiple goroutines. +type NodeFilterFn func(node interface{}) bool + +// Result is the result of Executing a Query. +type Result struct { + items []interface{} + positions []toml.Position +} + +// appends a value/position pair to the result set. +func (r *Result) appendResult(node interface{}, pos toml.Position) { + r.items = append(r.items, node) + r.positions = append(r.positions, pos) +} + +// Values is a set of values within a Result. The order of values is not +// guaranteed to be in document order, and may be different each time a query is +// executed. +func (r Result) Values() []interface{} { + return r.items +} + +// Positions is a set of positions for values within a Result. Each index +// in Positions() corresponds to the entry in Value() of the same index. +func (r Result) Positions() []toml.Position { + return r.positions +} + +// runtime context for executing query paths +type queryContext struct { + result *Result + filters *map[string]NodeFilterFn + lastPosition toml.Position +} + +// generic path functor interface +type pathFn interface { + setNext(next pathFn) + // it is the caller's responsibility to set the ctx.lastPosition before invoking call() + // node can be one of: *toml.Tree, []*toml.Tree, or a scalar + call(node interface{}, ctx *queryContext) +} + +// A Query is the representation of a compiled TOML path. A Query is safe +// for concurrent use by multiple goroutines. +type Query struct { + root pathFn + tail pathFn + filters *map[string]NodeFilterFn +} + +func newQuery() *Query { + return &Query{ + root: nil, + tail: nil, + filters: &defaultFilterFunctions, + } +} + +func (q *Query) appendPath(next pathFn) { + if q.root == nil { + q.root = next + } else { + q.tail.setNext(next) + } + q.tail = next + next.setNext(newTerminatingFn()) // init the next functor +} + +// Compile compiles a TOML path expression. The returned Query can be used +// to match elements within a Tree and its descendants. See Execute. +func Compile(path string) (*Query, error) { + return parseQuery(lexQuery(path)) +} + +// Execute executes a query against a Tree, and returns the result of the query. +func (q *Query) Execute(tree *toml.Tree) *Result { + result := &Result{ + items: []interface{}{}, + positions: []toml.Position{}, + } + if q.root == nil { + result.appendResult(tree, tree.GetPosition("")) + } else { + ctx := &queryContext{ + result: result, + filters: q.filters, + } + ctx.lastPosition = tree.Position() + q.root.call(tree, ctx) + } + return result +} + +// CompileAndExecute is a shorthand for Compile(path) followed by Execute(tree). +func CompileAndExecute(path string, tree *toml.Tree) (*Result, error) { + query, err := Compile(path) + if err != nil { + return nil, err + } + return query.Execute(tree), nil +} + +// SetFilter sets a user-defined filter function. These may be used inside +// "?(..)" query expressions to filter TOML document elements within a query. +func (q *Query) SetFilter(name string, fn NodeFilterFn) { + if q.filters == &defaultFilterFunctions { + // clone the static table + q.filters = &map[string]NodeFilterFn{} + for k, v := range defaultFilterFunctions { + (*q.filters)[k] = v + } + } + (*q.filters)[name] = fn +} + +var defaultFilterFunctions = map[string]NodeFilterFn{ + "tree": func(node interface{}) bool { + _, ok := node.(*toml.Tree) + return ok + }, + "int": func(node interface{}) bool { + _, ok := node.(int64) + return ok + }, + "float": func(node interface{}) bool { + _, ok := node.(float64) + return ok + }, + "string": func(node interface{}) bool { + _, ok := node.(string) + return ok + }, + "time": func(node interface{}) bool { + _, ok := node.(time.Time) + return ok + }, + "bool": func(node interface{}) bool { + _, ok := node.(bool) + return ok + }, +} diff --git a/vendor/github.com/pelletier/go-toml/query/query_test.go b/vendor/github.com/pelletier/go-toml/query/query_test.go new file mode 100644 index 000000000..903a8dc73 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/query_test.go @@ -0,0 +1,157 @@ +package query + +import ( + "fmt" + "testing" + + "github.com/pelletier/go-toml" +) + +func assertArrayContainsInAnyOrder(t *testing.T, array []interface{}, objects ...interface{}) { + if len(array) != len(objects) { + t.Fatalf("array contains %d objects but %d are expected", len(array), len(objects)) + } + + for _, o := range objects { + found := false + for _, a := range array { + if a == o { + found = true + break + } + } + if !found { + t.Fatal(o, "not found in array", array) + } + } +} + +func TestQueryExample(t *testing.T) { + config, _ := toml.Load(` + [[book]] + title = "The Stand" + author = "Stephen King" + [[book]] + title = "For Whom the Bell Tolls" + author = "Ernest Hemmingway" + [[book]] + title = "Neuromancer" + author = "William Gibson" + `) + authors, err := CompileAndExecute("$.book.author", config) + if err != nil { + t.Fatal("unexpected error:", err) + } + names := authors.Values() + if len(names) != 3 { + t.Fatalf("query should return 3 names but returned %d", len(names)) + } + assertArrayContainsInAnyOrder(t, names, "Stephen King", "Ernest Hemmingway", "William Gibson") +} + +func TestQueryReadmeExample(t *testing.T) { + config, _ := toml.Load(` +[postgres] +user = "pelletier" +password = "mypassword" +`) + + query, err := Compile("$..[user,password]") + if err != nil { + t.Fatal("unexpected error:", err) + } + results := query.Execute(config) + values := results.Values() + if len(values) != 2 { + t.Fatalf("query should return 2 values but returned %d", len(values)) + } + assertArrayContainsInAnyOrder(t, values, "pelletier", "mypassword") +} + +func TestQueryPathNotPresent(t *testing.T) { + config, _ := toml.Load(`a = "hello"`) + query, err := Compile("$.foo.bar") + if err != nil { + t.Fatal("unexpected error:", err) + } + results := query.Execute(config) + if err != nil { + t.Fatalf("err should be nil. got %s instead", err) + } + if len(results.items) != 0 { + t.Fatalf("no items should be matched. %d matched instead", len(results.items)) + } +} + +func ExampleNodeFilterFn_filterExample() { + tree, _ := toml.Load(` + [struct_one] + foo = "foo" + bar = "bar" + + [struct_two] + baz = "baz" + gorf = "gorf" + `) + + // create a query that references a user-defined-filter + query, _ := Compile("$[?(bazOnly)]") + + // define the filter, and assign it to the query + query.SetFilter("bazOnly", func(node interface{}) bool { + if tree, ok := node.(*toml.Tree); ok { + return tree.Has("baz") + } + return false // reject all other node types + }) + + // results contain only the 'struct_two' Tree + query.Execute(tree) +} + +func ExampleQuery_queryExample() { + config, _ := toml.Load(` + [[book]] + title = "The Stand" + author = "Stephen King" + [[book]] + title = "For Whom the Bell Tolls" + author = "Ernest Hemmingway" + [[book]] + title = "Neuromancer" + author = "William Gibson" + `) + + // find and print all the authors in the document + query, _ := Compile("$.book.author") + authors := query.Execute(config) + for _, name := range authors.Values() { + fmt.Println(name) + } +} + +func TestTomlQuery(t *testing.T) { + tree, err := toml.Load("[foo.bar]\na=1\nb=2\n[baz.foo]\na=3\nb=4\n[gorf.foo]\na=5\nb=6") + if err != nil { + t.Error(err) + return + } + query, err := Compile("$.foo.bar") + if err != nil { + t.Error(err) + return + } + result := query.Execute(tree) + values := result.Values() + if len(values) != 1 { + t.Errorf("Expected resultset of 1, got %d instead: %v", len(values), values) + } + + if tt, ok := values[0].(*toml.Tree); !ok { + t.Errorf("Expected type of Tree: %T", values[0]) + } else if tt.Get("a") != int64(1) { + t.Errorf("Expected 'a' with a value 1: %v", tt.Get("a")) + } else if tt.Get("b") != int64(2) { + t.Errorf("Expected 'b' with a value 2: %v", tt.Get("b")) + } +} diff --git a/vendor/github.com/pelletier/go-toml/query/tokens.go b/vendor/github.com/pelletier/go-toml/query/tokens.go new file mode 100644 index 000000000..9ae579de2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query/tokens.go @@ -0,0 +1,106 @@ +package query + +import ( + "fmt" + "github.com/pelletier/go-toml" + "strconv" + "unicode" +) + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenKey + tokenString + tokenInteger + tokenFloat + tokenLeftBracket + tokenRightBracket + tokenLeftParen + tokenRightParen + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Key", + "String", + "Integer", + "Float", + "[", + "]", + "(", + ")", + ",", + ":", + "$", + "*", + "?", + ".", + "..", +} + +type token struct { + toml.Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) Int() int { + if result, err := strconv.Atoi(t.val); err != nil { + panic(err) + } else { + return result + } +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return unicode.IsLetter(r) || r == '_' +} + +func isDigit(r rune) bool { + return unicode.IsNumber(r) +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh new file mode 100755 index 000000000..a70a8b022 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/test.sh @@ -0,0 +1,91 @@ +#!/bin/bash +# fail out of the script if anything here fails +set -e +set -o pipefail + +# set the path to the present working directory +export GOPATH=`pwd` + +function git_clone() { + path=$1 + branch=$2 + version=$3 + if [ ! -d "src/$path" ]; then + mkdir -p src/$path + git clone https://$path.git src/$path + fi + pushd src/$path + git checkout "$branch" + git reset --hard "$version" + popd +} + +# Remove potential previous runs +rm -rf src test_program_bin toml-test + +# Run go vet +go vet ./... + +go get github.com/pelletier/go-buffruneio +go get github.com/davecgh/go-spew/spew +go get gopkg.in/yaml.v2 +go get github.com/BurntSushi/toml + +# get code for BurntSushi TOML validation +# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize) +git_clone github.com/BurntSushi/toml master HEAD +git_clone github.com/BurntSushi/toml-test master HEAD #was: 0.2.0 HEAD + +# build the BurntSushi test application +go build -o toml-test github.com/BurntSushi/toml-test + +# vendorize the current lib for testing +# NOTE: this basically mocks an install without having to go back out to github for code +mkdir -p src/github.com/pelletier/go-toml/cmd +mkdir -p src/github.com/pelletier/go-toml/query +cp *.go *.toml src/github.com/pelletier/go-toml +cp -R cmd/* src/github.com/pelletier/go-toml/cmd +cp -R query/* src/github.com/pelletier/go-toml/query +go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go + +# Run basic unit tests +go test github.com/pelletier/go-toml -covermode=count -coverprofile=coverage.out +go test github.com/pelletier/go-toml/cmd/tomljson +go test github.com/pelletier/go-toml/query + +# run the entire BurntSushi test suite +if [[ $# -eq 0 ]] ; then + echo "Running all BurntSushi tests" + ./toml-test ./test_program_bin | tee test_out +else + # run a specific test + test=$1 + test_path='src/github.com/BurntSushi/toml-test/tests' + valid_test="$test_path/valid/$test" + invalid_test="$test_path/invalid/$test" + + if [ -e "$valid_test.toml" ]; then + echo "Valid Test TOML for $test:" + echo "====" + cat "$valid_test.toml" + + echo "Valid Test JSON for $test:" + echo "====" + cat "$valid_test.json" + + echo "Go-TOML Output for $test:" + echo "====" + cat "$valid_test.toml" | ./test_program_bin + fi + + if [ -e "$invalid_test.toml" ]; then + echo "Invalid Test TOML for $test:" + echo "====" + cat "$invalid_test.toml" + + echo "Go-TOML Output for $test:" + echo "====" + echo "go-toml Output:" + cat "$invalid_test.toml" | ./test_program_bin + fi +fi diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go new file mode 100644 index 000000000..1a9081346 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -0,0 +1,144 @@ +package toml + +import ( + "fmt" + "strconv" + "unicode" +) + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenComment + tokenKey + tokenString + tokenInteger + tokenTrue + tokenFalse + tokenFloat + tokenInf + tokenNan + tokenEqual + tokenLeftBracket + tokenRightBracket + tokenLeftCurlyBrace + tokenRightCurlyBrace + tokenLeftParen + tokenRightParen + tokenDoubleLeftBracket + tokenDoubleRightBracket + tokenDate + tokenKeyGroup + tokenKeyGroupArray + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot + tokenEOL +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Comment", + "Key", + "String", + "Integer", + "True", + "False", + "Float", + "Inf", + "NaN", + "=", + "[", + "]", + "{", + "}", + "(", + ")", + "]]", + "[[", + "Date", + "KeyGroup", + "KeyGroupArray", + ",", + ":", + "$", + "*", + "?", + ".", + "..", + "EOL", +} + +type token struct { + Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) Int() int { + if result, err := strconv.Atoi(t.val); err != nil { + panic(err) + } else { + return result + } +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return unicode.IsLetter(r) || r == '_' +} + +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') +} + +func isDigit(r rune) bool { + return unicode.IsNumber(r) +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} diff --git a/vendor/github.com/pelletier/go-toml/token_test.go b/vendor/github.com/pelletier/go-toml/token_test.go new file mode 100644 index 000000000..20b560d51 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/token_test.go @@ -0,0 +1,67 @@ +package toml + +import "testing" + +func TestTokenStringer(t *testing.T) { + var tests = []struct { + tt tokenType + expect string + }{ + {tokenError, "Error"}, + {tokenEOF, "EOF"}, + {tokenComment, "Comment"}, + {tokenKey, "Key"}, + {tokenString, "String"}, + {tokenInteger, "Integer"}, + {tokenTrue, "True"}, + {tokenFalse, "False"}, + {tokenFloat, "Float"}, + {tokenEqual, "="}, + {tokenLeftBracket, "["}, + {tokenRightBracket, "]"}, + {tokenLeftCurlyBrace, "{"}, + {tokenRightCurlyBrace, "}"}, + {tokenLeftParen, "("}, + {tokenRightParen, ")"}, + {tokenDoubleLeftBracket, "]]"}, + {tokenDoubleRightBracket, "[["}, + {tokenDate, "Date"}, + {tokenKeyGroup, "KeyGroup"}, + {tokenKeyGroupArray, "KeyGroupArray"}, + {tokenComma, ","}, + {tokenColon, ":"}, + {tokenDollar, "$"}, + {tokenStar, "*"}, + {tokenQuestion, "?"}, + {tokenDot, "."}, + {tokenDotDot, ".."}, + {tokenEOL, "EOL"}, + {tokenEOL + 1, "Unknown"}, + } + + for i, test := range tests { + got := test.tt.String() + if got != test.expect { + t.Errorf("[%d] invalid string of token type; got %q, expected %q", i, got, test.expect) + } + } +} + +func TestTokenString(t *testing.T) { + var tests = []struct { + tok token + expect string + }{ + {token{Position{1, 1}, tokenEOF, ""}, "EOF"}, + {token{Position{1, 1}, tokenError, "Δt"}, "Δt"}, + {token{Position{1, 1}, tokenString, "bar"}, `"bar"`}, + {token{Position{1, 1}, tokenString, "123456789012345"}, `"123456789012345"`}, + } + + for i, test := range tests { + got := test.tok.String() + if got != test.expect { + t.Errorf("[%d] invalid of string token; got %q, expected %q", i, got, test.expect) + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go new file mode 100644 index 000000000..05493a444 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -0,0 +1,309 @@ +package toml + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" +) + +type tomlValue struct { + value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list + comment string + commented bool + position Position +} + +// Tree is the result of the parsing of a TOML file. +type Tree struct { + values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree + comment string + commented bool + position Position +} + +func newTree() *Tree { + return &Tree{ + values: make(map[string]interface{}), + position: Position{}, + } +} + +// TreeFromMap initializes a new Tree object using the given map. +func TreeFromMap(m map[string]interface{}) (*Tree, error) { + result, err := toTree(m) + if err != nil { + return nil, err + } + return result.(*Tree), nil +} + +// Position returns the position of the tree. +func (t *Tree) Position() Position { + return t.position +} + +// Has returns a boolean indicating if the given key exists. +func (t *Tree) Has(key string) bool { + if key == "" { + return false + } + return t.HasPath(strings.Split(key, ".")) +} + +// HasPath returns true if the given path of keys exists, false otherwise. +func (t *Tree) HasPath(keys []string) bool { + return t.GetPath(keys) != nil +} + +// Keys returns the keys of the toplevel tree (does not recurse). +func (t *Tree) Keys() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + return keys +} + +// Get the value at key in the Tree. +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// If you need to retrieve non-bare keys, use GetPath. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) Get(key string) interface{} { + if key == "" { + return t + } + return t.GetPath(strings.Split(key, ".")) +} + +// GetPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.value + default: + return node + } +} + +// GetPosition returns the position of the given key. +func (t *Tree) GetPosition(key string) Position { + if key == "" { + return t.position + } + return t.GetPositionPath(strings.Split(key, ".")) +} + +// GetPositionPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPositionPath(keys []string) Position { + if len(keys) == 0 { + return t.position + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return Position{0, 0} + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + subtree = node[len(node)-1] + default: + return Position{0, 0} + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.position + case *Tree: + return node.position + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + return node[len(node)-1].position + default: + return Position{0, 0} + } +} + +// GetDefault works like Get but with a default value +func (t *Tree) GetDefault(key string, def interface{}) interface{} { + val := t.Get(key) + if val == nil { + return def + } + return val +} + +// Set an element in the tree. +// Key is a dot-separated path (e.g. a.b.c). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) Set(key string, value interface{}) { + t.SetWithComment(key, "", false, value) +} + +// SetWithComment is the same as Set, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { + t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) +} + +// SetPath sets an element in the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) SetPath(keys []string, value interface{}) { + t.SetPathWithComment(keys, "", false, value) +} + +// SetPathWithComment is the same as SetPath, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTree() + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + subtree.values[intermediateKey] = append(node, newTree()) + } + subtree = node[len(node)-1] + } + } + + var toInsert interface{} + + switch value.(type) { + case *Tree: + tt := value.(*Tree) + tt.comment = comment + toInsert = value + case []*Tree: + toInsert = value + case *tomlValue: + tt := value.(*tomlValue) + tt.comment = comment + toInsert = tt + default: + toInsert = &tomlValue{value: value, comment: comment, commented: commented} + } + + subtree.values[keys[len(keys)-1]] = toInsert +} + +// createSubTree takes a tree and a key and create the necessary intermediate +// subtrees to create a subtree at that point. In-place. +// +// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] +// and tree[a][b][c] +// +// Returns nil on success, error object on failure +func (t *Tree) createSubTree(keys []string, pos Position) error { + subtree := t + for _, intermediateKey := range keys { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + tree := newTree() + tree.position = pos + subtree.values[intermediateKey] = tree + nextTree = tree + } + + switch node := nextTree.(type) { + case []*Tree: + subtree = node[len(node)-1] + case *Tree: + subtree = node + default: + return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", + strings.Join(keys, "."), intermediateKey, nextTree, nextTree) + } + } + return nil +} + +// LoadBytes creates a Tree from a []byte. +func LoadBytes(b []byte) (tree *Tree, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = errors.New(r.(string)) + } + }() + tree = parseToml(lexToml(b)) + return +} + +// LoadReader creates a Tree from any io.Reader. +func LoadReader(reader io.Reader) (tree *Tree, err error) { + inputBytes, err := ioutil.ReadAll(reader) + if err != nil { + return + } + tree, err = LoadBytes(inputBytes) + return +} + +// Load creates a Tree from a string. +func Load(content string) (tree *Tree, err error) { + return LoadBytes([]byte(content)) +} + +// LoadFile creates a Tree from a file. +func LoadFile(path string) (tree *Tree, err error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return LoadReader(file) +} diff --git a/vendor/github.com/pelletier/go-toml/toml_test.go b/vendor/github.com/pelletier/go-toml/toml_test.go new file mode 100644 index 000000000..ab9c24253 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/toml_test.go @@ -0,0 +1,106 @@ +// Testing support for go-toml + +package toml + +import ( + "testing" +) + +func TestTomlHas(t *testing.T) { + tree, _ := Load(` + [test] + key = "value" + `) + + if !tree.Has("test.key") { + t.Errorf("Has - expected test.key to exists") + } + + if tree.Has("") { + t.Errorf("Should return false if the key is not provided") + } +} + +func TestTomlGet(t *testing.T) { + tree, _ := Load(` + [test] + key = "value" + `) + + if tree.Get("") != tree { + t.Errorf("Get should return the tree itself when given an empty path") + } + + if tree.Get("test.key") != "value" { + t.Errorf("Get should return the value") + } + if tree.Get(`\`) != nil { + t.Errorf("should return nil when the key is malformed") + } +} + +func TestTomlGetDefault(t *testing.T) { + tree, _ := Load(` + [test] + key = "value" + `) + + if tree.GetDefault("", "hello") != tree { + t.Error("GetDefault should return the tree itself when given an empty path") + } + + if tree.GetDefault("test.key", "hello") != "value" { + t.Error("Get should return the value") + } + + if tree.GetDefault("whatever", "hello") != "hello" { + t.Error("GetDefault should return the default value if the key does not exist") + } +} + +func TestTomlHasPath(t *testing.T) { + tree, _ := Load(` + [test] + key = "value" + `) + + if !tree.HasPath([]string{"test", "key"}) { + t.Errorf("HasPath - expected test.key to exists") + } +} + +func TestTomlGetPath(t *testing.T) { + node := newTree() + //TODO: set other node data + + for idx, item := range []struct { + Path []string + Expected *Tree + }{ + { // empty path test + []string{}, + node, + }, + } { + result := node.GetPath(item.Path) + if result != item.Expected { + t.Errorf("GetPath[%d] %v - expected %v, got %v instead.", idx, item.Path, item.Expected, result) + } + } + + tree, _ := Load("[foo.bar]\na=1\nb=2\n[baz.foo]\na=3\nb=4\n[gorf.foo]\na=5\nb=6") + if tree.GetPath([]string{"whatever"}) != nil { + t.Error("GetPath should return nil when the key does not exist") + } +} + +func TestTomlFromMap(t *testing.T) { + simpleMap := map[string]interface{}{"hello": 42} + tree, err := TreeFromMap(simpleMap) + if err != nil { + t.Fatal("unexpected error:", err) + } + if tree.Get("hello") != int64(42) { + t.Fatal("hello should be 42, not", tree.Get("hello")) + } +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go new file mode 100644 index 000000000..79610e9b3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -0,0 +1,142 @@ +package toml + +import ( + "fmt" + "reflect" + "time" +) + +var kindToType = [reflect.String + 1]reflect.Type{ + reflect.Bool: reflect.TypeOf(true), + reflect.String: reflect.TypeOf(""), + reflect.Float32: reflect.TypeOf(float64(1)), + reflect.Float64: reflect.TypeOf(float64(1)), + reflect.Int: reflect.TypeOf(int64(1)), + reflect.Int8: reflect.TypeOf(int64(1)), + reflect.Int16: reflect.TypeOf(int64(1)), + reflect.Int32: reflect.TypeOf(int64(1)), + reflect.Int64: reflect.TypeOf(int64(1)), + reflect.Uint: reflect.TypeOf(uint64(1)), + reflect.Uint8: reflect.TypeOf(uint64(1)), + reflect.Uint16: reflect.TypeOf(uint64(1)), + reflect.Uint32: reflect.TypeOf(uint64(1)), + reflect.Uint64: reflect.TypeOf(uint64(1)), +} + +// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. +// supported values: +// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 +func typeFor(k reflect.Kind) reflect.Type { + if k > 0 && int(k) < len(kindToType) { + return kindToType[k] + } + return nil +} + +func simpleValueCoercion(object interface{}) (interface{}, error) { + switch original := object.(type) { + case string, bool, int64, uint64, float64, time.Time: + return original, nil + case int: + return int64(original), nil + case int8: + return int64(original), nil + case int16: + return int64(original), nil + case int32: + return int64(original), nil + case uint: + return uint64(original), nil + case uint8: + return uint64(original), nil + case uint16: + return uint64(original), nil + case uint32: + return uint64(original), nil + case float32: + return float64(original), nil + case fmt.Stringer: + return original.String(), nil + default: + return nil, fmt.Errorf("cannot convert type %T to Tree", object) + } +} + +func sliceToTree(object interface{}) (interface{}, error) { + // arrays are a bit tricky, since they can represent either a + // collection of simple values, which is represented by one + // *tomlValue, or an array of tables, which is represented by an + // array of *Tree. + + // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice + value := reflect.ValueOf(object) + insideType := value.Type().Elem() + length := value.Len() + if length > 0 { + insideType = reflect.ValueOf(value.Index(0).Interface()).Type() + } + if insideType.Kind() == reflect.Map { + // this is considered as an array of tables + tablesArray := make([]*Tree, 0, length) + for i := 0; i < length; i++ { + table := value.Index(i) + tree, err := toTree(table.Interface()) + if err != nil { + return nil, err + } + tablesArray = append(tablesArray, tree.(*Tree)) + } + return tablesArray, nil + } + + sliceType := typeFor(insideType.Kind()) + if sliceType == nil { + sliceType = insideType + } + + arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) + + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil +} + +func toTree(object interface{}) (interface{}, error) { + value := reflect.ValueOf(object) + + if value.Kind() == reflect.Map { + values := map[string]interface{}{} + keys := value.MapKeys() + for _, key := range keys { + if key.Kind() != reflect.String { + if _, ok := key.Interface().(string); !ok { + return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) + } + } + + v := value.MapIndex(key) + newValue, err := toTree(v.Interface()) + if err != nil { + return nil, err + } + values[key.String()] = newValue + } + return &Tree{values: values, position: Position{}}, nil + } + + if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { + return sliceToTree(object) + } + + simpleValue, err := simpleValueCoercion(object) + if err != nil { + return nil, err + } + return &tomlValue{value: simpleValue, position: Position{}}, nil +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create_test.go b/vendor/github.com/pelletier/go-toml/tomltree_create_test.go new file mode 100644 index 000000000..3465a1066 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_create_test.go @@ -0,0 +1,126 @@ +package toml + +import ( + "strconv" + "testing" + "time" +) + +type customString string + +type stringer struct{} + +func (s stringer) String() string { + return "stringer" +} + +func validate(t *testing.T, path string, object interface{}) { + switch o := object.(type) { + case *Tree: + for key, tree := range o.values { + validate(t, path+"."+key, tree) + } + case []*Tree: + for index, tree := range o { + validate(t, path+"."+strconv.Itoa(index), tree) + } + case *tomlValue: + switch o.value.(type) { + case int64, uint64, bool, string, float64, time.Time, + []int64, []uint64, []bool, []string, []float64, []time.Time: + default: + t.Fatalf("tomlValue at key %s containing incorrect type %T", path, o.value) + } + default: + t.Fatalf("value at key %s is of incorrect type %T", path, object) + } + t.Logf("validation ok %s as %T", path, object) +} + +func validateTree(t *testing.T, tree *Tree) { + validate(t, "", tree) +} + +func TestTreeCreateToTree(t *testing.T) { + data := map[string]interface{}{ + "a_string": "bar", + "an_int": 42, + "time": time.Now(), + "int8": int8(2), + "int16": int16(2), + "int32": int32(2), + "uint8": uint8(2), + "uint16": uint16(2), + "uint32": uint32(2), + "float32": float32(2), + "a_bool": false, + "stringer": stringer{}, + "nested": map[string]interface{}{ + "foo": "bar", + }, + "array": []string{"a", "b", "c"}, + "array_uint": []uint{uint(1), uint(2)}, + "array_table": []map[string]interface{}{{"sub_map": 52}}, + "array_times": []time.Time{time.Now(), time.Now()}, + "map_times": map[string]time.Time{"now": time.Now()}, + "custom_string_map_key": map[customString]interface{}{customString("custom"): "custom"}, + } + tree, err := TreeFromMap(data) + if err != nil { + t.Fatal("unexpected error:", err) + } + validateTree(t, tree) +} + +func TestTreeCreateToTreeInvalidLeafType(t *testing.T) { + _, err := TreeFromMap(map[string]interface{}{"foo": t}) + expected := "cannot convert type *testing.T to Tree" + if err.Error() != expected { + t.Fatalf("expected error %s, got %s", expected, err.Error()) + } +} + +func TestTreeCreateToTreeInvalidMapKeyType(t *testing.T) { + _, err := TreeFromMap(map[string]interface{}{"foo": map[int]interface{}{2: 1}}) + expected := "map key needs to be a string, not int (int)" + if err.Error() != expected { + t.Fatalf("expected error %s, got %s", expected, err.Error()) + } +} + +func TestTreeCreateToTreeInvalidArrayMemberType(t *testing.T) { + _, err := TreeFromMap(map[string]interface{}{"foo": []*testing.T{t}}) + expected := "cannot convert type *testing.T to Tree" + if err.Error() != expected { + t.Fatalf("expected error %s, got %s", expected, err.Error()) + } +} + +func TestTreeCreateToTreeInvalidTableGroupType(t *testing.T) { + _, err := TreeFromMap(map[string]interface{}{"foo": []map[string]interface{}{{"hello": t}}}) + expected := "cannot convert type *testing.T to Tree" + if err.Error() != expected { + t.Fatalf("expected error %s, got %s", expected, err.Error()) + } +} + +func TestRoundTripArrayOfTables(t *testing.T) { + orig := "\n[[stuff]]\n name = \"foo\"\n things = [\"a\",\"b\"]\n" + tree, err := Load(orig) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + m := tree.ToMap() + + tree, err = TreeFromMap(m) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + want := orig + got := tree.String() + + if got != want { + t.Errorf("want:\n%s\ngot:\n%s", want, got) + } +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go new file mode 100644 index 000000000..d322a9764 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -0,0 +1,289 @@ +package toml + +import ( + "bytes" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// encodes a string to a TOML-compliant string value +func encodeTomlString(value string) string { + var b bytes.Buffer + + for _, rr := range value { + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString(`\t`) + case '\n': + b.WriteString(`\n`) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString(`\r`) + case '"': + b.WriteString(`\"`) + case '\\': + b.WriteString(`\\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) { + switch value := v.(type) { + case uint64: + return strconv.FormatUint(value, 10), nil + case int64: + return strconv.FormatInt(value, 10), nil + case float64: + // Ensure a round float does contain a decimal point. Otherwise feeding + // the output back to the parser would convert to an integer. + if math.Trunc(value) == value { + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil + } + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil + case string: + return "\"" + encodeTomlString(value) + "\"", nil + case []byte: + b, _ := v.([]byte) + return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine) + case bool: + if value { + return "true", nil + } + return "false", nil + case time.Time: + return value.Format(time.RFC3339), nil + case nil: + return "", nil + } + + rv := reflect.ValueOf(v) + + if rv.Kind() == reflect.Slice { + var values []string + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine) + if err != nil { + return "", err + } + values = append(values, itemRepr) + } + if arraysOneElementPerLine && len(values) > 1 { + stringBuffer := bytes.Buffer{} + valueIndent := indent + ` ` // TODO: move that to a shared encoder state + + stringBuffer.WriteString("[\n") + + for i, value := range values { + stringBuffer.WriteString(valueIndent) + stringBuffer.WriteString(value) + if i != len(values)-1 { + stringBuffer.WriteString(`,`) + } + stringBuffer.WriteString("\n") + } + + stringBuffer.WriteString(indent + "]") + + return stringBuffer.String(), nil + } + return "[" + strings.Join(values, ",") + "]", nil + } + return "", fmt.Errorf("unsupported value type %T: %v", v, v) +} + +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + simpleValuesKeys := make([]string, 0) + complexValuesKeys := make([]string, 0) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree, []*Tree: + complexValuesKeys = append(complexValuesKeys, k) + default: + simpleValuesKeys = append(simpleValuesKeys, k) + } + } + + sort.Strings(simpleValuesKeys) + sort.Strings(complexValuesKeys) + + for _, k := range simpleValuesKeys { + v, ok := t.values[k].(*tomlValue) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + + repr, err := tomlValueStringRepresentation(v.value, indent, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + var commented string + if v.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + } + + for _, k := range complexValuesKeys { + v := t.values[k] + + combinedKey := k + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + var commented string + if t.commented { + commented = "# " + } + + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + } + } + } + + return bytesCount, nil +} + +func writeStrings(w io.Writer, s ...string) (int, error) { + var n int + for i := range s { + b, err := io.WriteString(w, s[i]) + n += b + if err != nil { + return n, err + } + } + return n, nil +} + +// WriteTo encode the Tree as Toml and writes it to the writer w. +// Returns the number of bytes written in case of success, or an error if anything happened. +func (t *Tree) WriteTo(w io.Writer) (int64, error) { + return t.writeTo(w, "", "", 0, false) +} + +// ToTomlString generates a human-readable representation of the current tree. +// Output spans multiple lines, and is suitable for ingest by a TOML parser. +// If the conversion cannot be performed, ToString returns a non-nil error. +func (t *Tree) ToTomlString() (string, error) { + var buf bytes.Buffer + _, err := t.WriteTo(&buf) + if err != nil { + return "", err + } + return buf.String(), nil +} + +// String generates a human-readable representation of the current tree. +// Alias of ToString. Present to implement the fmt.Stringer interface. +func (t *Tree) String() string { + result, _ := t.ToTomlString() + return result +} + +// ToMap recursively generates a representation of the tree using Go built-in structures. +// The following types are used: +// +// * bool +// * float64 +// * int64 +// * string +// * uint64 +// * time.Time +// * map[string]interface{} (where interface{} is any of this list) +// * []interface{} (where interface{} is any of this list) +func (t *Tree) ToMap() map[string]interface{} { + result := map[string]interface{}{} + + for k, v := range t.values { + switch node := v.(type) { + case []*Tree: + var array []interface{} + for _, item := range node { + array = append(array, item.ToMap()) + } + result[k] = array + case *Tree: + result[k] = node.ToMap() + case *tomlValue: + result[k] = node.value + } + } + return result +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write_test.go b/vendor/github.com/pelletier/go-toml/tomltree_write_test.go new file mode 100644 index 000000000..206203b88 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_write_test.go @@ -0,0 +1,376 @@ +package toml + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "testing" + "time" +) + +type failingWriter struct { + failAt int + written int + buffer bytes.Buffer +} + +func (f *failingWriter) Write(p []byte) (n int, err error) { + count := len(p) + toWrite := f.failAt - (count + f.written) + if toWrite < 0 { + toWrite = 0 + } + if toWrite > count { + f.written += count + f.buffer.Write(p) + return count, nil + } + + f.buffer.Write(p[:toWrite]) + f.written = f.failAt + return toWrite, fmt.Errorf("failingWriter failed after writing %d bytes", f.written) +} + +func assertErrorString(t *testing.T, expected string, err error) { + expectedErr := errors.New(expected) + if err == nil || err.Error() != expectedErr.Error() { + t.Errorf("expecting error %s, but got %s instead", expected, err) + } +} + +func TestTreeWriteToEmptyTable(t *testing.T) { + doc := `[[empty-tables]] +[[empty-tables]]` + + toml, err := Load(doc) + if err != nil { + t.Fatal("Unexpected Load error:", err) + } + tomlString, err := toml.ToTomlString() + if err != nil { + t.Fatal("Unexpected ToTomlString error:", err) + } + + expected := ` +[[empty-tables]] + +[[empty-tables]] +` + + if tomlString != expected { + t.Fatalf("Expected:\n%s\nGot:\n%s", expected, tomlString) + } +} + +func TestTreeWriteToTomlString(t *testing.T) { + toml, err := Load(`name = { first = "Tom", last = "Preston-Werner" } +points = { x = 1, y = 2 }`) + + if err != nil { + t.Fatal("Unexpected error:", err) + } + + tomlString, _ := toml.ToTomlString() + reparsedTree, err := Load(tomlString) + + assertTree(t, reparsedTree, err, map[string]interface{}{ + "name": map[string]interface{}{ + "first": "Tom", + "last": "Preston-Werner", + }, + "points": map[string]interface{}{ + "x": int64(1), + "y": int64(2), + }, + }) +} + +func TestTreeWriteToTomlStringSimple(t *testing.T) { + tree, err := Load("[foo]\n\n[[foo.bar]]\na = 42\n\n[[foo.bar]]\na = 69\n") + if err != nil { + t.Errorf("Test failed to parse: %v", err) + return + } + result, err := tree.ToTomlString() + if err != nil { + t.Errorf("Unexpected error: %s", err) + } + expected := "\n[foo]\n\n [[foo.bar]]\n a = 42\n\n [[foo.bar]]\n a = 69\n" + if result != expected { + t.Errorf("Expected got '%s', expected '%s'", result, expected) + } +} + +func TestTreeWriteToTomlStringKeysOrders(t *testing.T) { + for i := 0; i < 100; i++ { + tree, _ := Load(` + foobar = true + bar = "baz" + foo = 1 + [qux] + foo = 1 + bar = "baz2"`) + + stringRepr, _ := tree.ToTomlString() + + t.Log("Intermediate string representation:") + t.Log(stringRepr) + + r := strings.NewReader(stringRepr) + toml, err := LoadReader(r) + + if err != nil { + t.Fatal("Unexpected error:", err) + } + + assertTree(t, toml, err, map[string]interface{}{ + "foobar": true, + "bar": "baz", + "foo": 1, + "qux": map[string]interface{}{ + "foo": 1, + "bar": "baz2", + }, + }) + } +} + +func testMaps(t *testing.T, actual, expected map[string]interface{}) { + if !reflect.DeepEqual(actual, expected) { + t.Fatal("trees aren't equal.\n", "Expected:\n", expected, "\nActual:\n", actual) + } +} + +func TestTreeWriteToMapSimple(t *testing.T) { + tree, _ := Load("a = 42\nb = 17") + + expected := map[string]interface{}{ + "a": int64(42), + "b": int64(17), + } + + testMaps(t, tree.ToMap(), expected) +} + +func TestTreeWriteToInvalidTreeSimpleValue(t *testing.T) { + tree := Tree{values: map[string]interface{}{"foo": int8(1)}} + _, err := tree.ToTomlString() + assertErrorString(t, "invalid value type at foo: int8", err) +} + +func TestTreeWriteToInvalidTreeTomlValue(t *testing.T) { + tree := Tree{values: map[string]interface{}{"foo": &tomlValue{value: int8(1), comment: "", position: Position{}}}} + _, err := tree.ToTomlString() + assertErrorString(t, "unsupported value type int8: 1", err) +} + +func TestTreeWriteToInvalidTreeTomlValueArray(t *testing.T) { + tree := Tree{values: map[string]interface{}{"foo": &tomlValue{value: int8(1), comment: "", position: Position{}}}} + _, err := tree.ToTomlString() + assertErrorString(t, "unsupported value type int8: 1", err) +} + +func TestTreeWriteToFailingWriterInSimpleValue(t *testing.T) { + toml, _ := Load(`a = 2`) + writer := failingWriter{failAt: 0, written: 0} + _, err := toml.WriteTo(&writer) + assertErrorString(t, "failingWriter failed after writing 0 bytes", err) +} + +func TestTreeWriteToFailingWriterInTable(t *testing.T) { + toml, _ := Load(` +[b] +a = 2`) + writer := failingWriter{failAt: 2, written: 0} + _, err := toml.WriteTo(&writer) + assertErrorString(t, "failingWriter failed after writing 2 bytes", err) + + writer = failingWriter{failAt: 13, written: 0} + _, err = toml.WriteTo(&writer) + assertErrorString(t, "failingWriter failed after writing 13 bytes", err) +} + +func TestTreeWriteToFailingWriterInArray(t *testing.T) { + toml, _ := Load(` +[[b]] +a = 2`) + writer := failingWriter{failAt: 2, written: 0} + _, err := toml.WriteTo(&writer) + assertErrorString(t, "failingWriter failed after writing 2 bytes", err) + + writer = failingWriter{failAt: 15, written: 0} + _, err = toml.WriteTo(&writer) + assertErrorString(t, "failingWriter failed after writing 15 bytes", err) +} + +func TestTreeWriteToMapExampleFile(t *testing.T) { + tree, _ := LoadFile("example.toml") + expected := map[string]interface{}{ + "title": "TOML Example", + "owner": map[string]interface{}{ + "name": "Tom Preston-Werner", + "organization": "GitHub", + "bio": "GitHub Cofounder & CEO\nLikes tater tots and beer.", + "dob": time.Date(1979, time.May, 27, 7, 32, 0, 0, time.UTC), + }, + "database": map[string]interface{}{ + "server": "192.168.1.1", + "ports": []interface{}{int64(8001), int64(8001), int64(8002)}, + "connection_max": int64(5000), + "enabled": true, + }, + "servers": map[string]interface{}{ + "alpha": map[string]interface{}{ + "ip": "10.0.0.1", + "dc": "eqdc10", + }, + "beta": map[string]interface{}{ + "ip": "10.0.0.2", + "dc": "eqdc10", + }, + }, + "clients": map[string]interface{}{ + "data": []interface{}{ + []interface{}{"gamma", "delta"}, + []interface{}{int64(1), int64(2)}, + }, + }, + } + testMaps(t, tree.ToMap(), expected) +} + +func TestTreeWriteToMapWithTablesInMultipleChunks(t *testing.T) { + tree, _ := Load(` + [[menu.main]] + a = "menu 1" + b = "menu 2" + [[menu.main]] + c = "menu 3" + d = "menu 4"`) + expected := map[string]interface{}{ + "menu": map[string]interface{}{ + "main": []interface{}{ + map[string]interface{}{"a": "menu 1", "b": "menu 2"}, + map[string]interface{}{"c": "menu 3", "d": "menu 4"}, + }, + }, + } + treeMap := tree.ToMap() + + testMaps(t, treeMap, expected) +} + +func TestTreeWriteToMapWithArrayOfInlineTables(t *testing.T) { + tree, _ := Load(` + [params] + language_tabs = [ + { key = "shell", name = "Shell" }, + { key = "ruby", name = "Ruby" }, + { key = "python", name = "Python" } + ]`) + + expected := map[string]interface{}{ + "params": map[string]interface{}{ + "language_tabs": []interface{}{ + map[string]interface{}{ + "key": "shell", + "name": "Shell", + }, + map[string]interface{}{ + "key": "ruby", + "name": "Ruby", + }, + map[string]interface{}{ + "key": "python", + "name": "Python", + }, + }, + }, + } + + treeMap := tree.ToMap() + testMaps(t, treeMap, expected) +} + +func TestTreeWriteToFloat(t *testing.T) { + tree, err := Load(`a = 3.0`) + if err != nil { + t.Fatal(err) + } + str, err := tree.ToTomlString() + if err != nil { + t.Fatal(err) + } + expected := `a = 3.0` + if strings.TrimSpace(str) != strings.TrimSpace(expected) { + t.Fatalf("Expected:\n%s\nGot:\n%s", expected, str) + } +} + +func TestTreeWriteToSpecialFloat(t *testing.T) { + expected := `a = +inf +b = -inf +c = nan` + + tree, err := Load(expected) + if err != nil { + t.Fatal(err) + } + str, err := tree.ToTomlString() + if err != nil { + t.Fatal(err) + } + if strings.TrimSpace(str) != strings.TrimSpace(expected) { + t.Fatalf("Expected:\n%s\nGot:\n%s", expected, str) + } +} + +func BenchmarkTreeToTomlString(b *testing.B) { + toml, err := Load(sampleHard) + if err != nil { + b.Fatal("Unexpected error:", err) + } + + for i := 0; i < b.N; i++ { + _, err := toml.ToTomlString() + if err != nil { + b.Fatal(err) + } + } +} + +var sampleHard = `# Test file for TOML +# Only this one tries to emulate a TOML file written by a user of the kind of parser writers probably hate +# This part you'll really hate + +[the] +test_string = "You'll hate me after this - #" # " Annoying, isn't it? + + [the.hard] + test_array = [ "] ", " # "] # ] There you go, parse this! + test_array2 = [ "Test #11 ]proved that", "Experiment #9 was a success" ] + # You didn't think it'd as easy as chucking out the last #, did you? + another_test_string = " Same thing, but with a string #" + harder_test_string = " And when \"'s are in the string, along with # \"" # "and comments are there too" + # Things will get harder + + [the.hard."bit#"] + "what?" = "You don't think some user won't do that?" + multi_line_array = [ + "]", + # ] Oh yes I did + ] + +# Each of the following keygroups/key value pairs should produce an error. Uncomment to them to test + +#[error] if you didn't catch this, your parser is broken +#string = "Anything other than tabs, spaces and newline after a keygroup or key value pair has ended should produce an error unless it is a comment" like this +#array = [ +# "This might most likely happen in multiline arrays", +# Like here, +# "or here, +# and here" +# ] End of array comment, forgot the # +#number = 3.14 pi <--again forgot the # ` diff --git a/vendor/github.com/pmezard/go-difflib/.travis.yml b/vendor/github.com/pmezard/go-difflib/.travis.yml new file mode 100644 index 000000000..90c9c6f91 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.5 + - tip + diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 000000000..c67dad612 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/README.md b/vendor/github.com/pmezard/go-difflib/README.md new file mode 100644 index 000000000..e87f307ed --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/README.md @@ -0,0 +1,50 @@ +go-difflib +========== + +[![Build Status](https://travis-ci.org/pmezard/go-difflib.png?branch=master)](https://travis-ci.org/pmezard/go-difflib) +[![GoDoc](https://godoc.org/github.com/pmezard/go-difflib/difflib?status.svg)](https://godoc.org/github.com/pmezard/go-difflib/difflib) + +Go-difflib is a partial port of python 3 difflib package. Its main goal +was to make unified and context diff available in pure Go, mostly for +testing purposes. + +The following class and functions (and related tests) have be ported: + +* `SequenceMatcher` +* `unified_diff()` +* `context_diff()` + +## Installation + +```bash +$ go get github.com/pmezard/go-difflib/difflib +``` + +### Quick Start + +Diffs are configured with Unified (or ContextDiff) structures, and can +be output to an io.Writer or returned as a string. + +```Go +diff := UnifiedDiff{ + A: difflib.SplitLines("foo\nbar\n"), + B: difflib.SplitLines("foo\nbaz\n"), + FromFile: "Original", + ToFile: "Current", + Context: 3, +} +text, _ := GetUnifiedDiffString(diff) +fmt.Printf(text) +``` + +would output: + +``` +--- Original ++++ Current +@@ -1,3 +1,3 @@ + foo +-bar ++baz +``` + diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 000000000..003e99fad --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,772 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + wf := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + } + + first, last := g[0], g[len(g)-1] + ws("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + wf("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + wf("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go new file mode 100644 index 000000000..d72511962 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go @@ -0,0 +1,426 @@ +package difflib + +import ( + "bytes" + "fmt" + "math" + "reflect" + "strings" + "testing" +) + +func assertAlmostEqual(t *testing.T, a, b float64, places int) { + if math.Abs(a-b) > math.Pow10(-places) { + t.Errorf("%.7f != %.7f", a, b) + } +} + +func assertEqual(t *testing.T, a, b interface{}) { + if !reflect.DeepEqual(a, b) { + t.Errorf("%v != %v", a, b) + } +} + +func splitChars(s string) []string { + chars := make([]string, 0, len(s)) + // Assume ASCII inputs + for i := 0; i != len(s); i++ { + chars = append(chars, string(s[i])) + } + return chars +} + +func TestSequenceMatcherRatio(t *testing.T) { + s := NewMatcher(splitChars("abcd"), splitChars("bcde")) + assertEqual(t, s.Ratio(), 0.75) + assertEqual(t, s.QuickRatio(), 0.75) + assertEqual(t, s.RealQuickRatio(), 1.0) +} + +func TestGetOptCodes(t *testing.T) { + a := "qabxcd" + b := "abycdf" + s := NewMatcher(splitChars(a), splitChars(b)) + w := &bytes.Buffer{} + for _, op := range s.GetOpCodes() { + fmt.Fprintf(w, "%s a[%d:%d], (%s) b[%d:%d] (%s)\n", string(op.Tag), + op.I1, op.I2, a[op.I1:op.I2], op.J1, op.J2, b[op.J1:op.J2]) + } + result := string(w.Bytes()) + expected := `d a[0:1], (q) b[0:0] () +e a[1:3], (ab) b[0:2] (ab) +r a[3:4], (x) b[2:3] (y) +e a[4:6], (cd) b[3:5] (cd) +i a[6:6], () b[5:6] (f) +` + if expected != result { + t.Errorf("unexpected op codes: \n%s", result) + } +} + +func TestGroupedOpCodes(t *testing.T) { + a := []string{} + for i := 0; i != 39; i++ { + a = append(a, fmt.Sprintf("%02d", i)) + } + b := []string{} + b = append(b, a[:8]...) + b = append(b, " i") + b = append(b, a[8:19]...) + b = append(b, " x") + b = append(b, a[20:22]...) + b = append(b, a[27:34]...) + b = append(b, " y") + b = append(b, a[35:]...) + s := NewMatcher(a, b) + w := &bytes.Buffer{} + for _, g := range s.GetGroupedOpCodes(-1) { + fmt.Fprintf(w, "group\n") + for _, op := range g { + fmt.Fprintf(w, " %s, %d, %d, %d, %d\n", string(op.Tag), + op.I1, op.I2, op.J1, op.J2) + } + } + result := string(w.Bytes()) + expected := `group + e, 5, 8, 5, 8 + i, 8, 8, 8, 9 + e, 8, 11, 9, 12 +group + e, 16, 19, 17, 20 + r, 19, 20, 20, 21 + e, 20, 22, 21, 23 + d, 22, 27, 23, 23 + e, 27, 30, 23, 26 +group + e, 31, 34, 27, 30 + r, 34, 35, 30, 31 + e, 35, 38, 31, 34 +` + if expected != result { + t.Errorf("unexpected op codes: \n%s", result) + } +} + +func ExampleGetUnifiedDiffCode() { + a := `one +two +three +four +fmt.Printf("%s,%T",a,b)` + b := `zero +one +three +four` + diff := UnifiedDiff{ + A: SplitLines(a), + B: SplitLines(b), + FromFile: "Original", + FromDate: "2005-01-26 23:30:50", + ToFile: "Current", + ToDate: "2010-04-02 10:20:52", + Context: 3, + } + result, _ := GetUnifiedDiffString(diff) + fmt.Println(strings.Replace(result, "\t", " ", -1)) + // Output: + // --- Original 2005-01-26 23:30:50 + // +++ Current 2010-04-02 10:20:52 + // @@ -1,5 +1,4 @@ + // +zero + // one + // -two + // three + // four + // -fmt.Printf("%s,%T",a,b) +} + +func ExampleGetContextDiffCode() { + a := `one +two +three +four +fmt.Printf("%s,%T",a,b)` + b := `zero +one +tree +four` + diff := ContextDiff{ + A: SplitLines(a), + B: SplitLines(b), + FromFile: "Original", + ToFile: "Current", + Context: 3, + Eol: "\n", + } + result, _ := GetContextDiffString(diff) + fmt.Print(strings.Replace(result, "\t", " ", -1)) + // Output: + // *** Original + // --- Current + // *************** + // *** 1,5 **** + // one + // ! two + // ! three + // four + // - fmt.Printf("%s,%T",a,b) + // --- 1,4 ---- + // + zero + // one + // ! tree + // four +} + +func ExampleGetContextDiffString() { + a := `one +two +three +four` + b := `zero +one +tree +four` + diff := ContextDiff{ + A: SplitLines(a), + B: SplitLines(b), + FromFile: "Original", + ToFile: "Current", + Context: 3, + Eol: "\n", + } + result, _ := GetContextDiffString(diff) + fmt.Printf(strings.Replace(result, "\t", " ", -1)) + // Output: + // *** Original + // --- Current + // *************** + // *** 1,4 **** + // one + // ! two + // ! three + // four + // --- 1,4 ---- + // + zero + // one + // ! tree + // four +} + +func rep(s string, count int) string { + return strings.Repeat(s, count) +} + +func TestWithAsciiOneInsert(t *testing.T) { + sm := NewMatcher(splitChars(rep("b", 100)), + splitChars("a"+rep("b", 100))) + assertAlmostEqual(t, sm.Ratio(), 0.995, 3) + assertEqual(t, sm.GetOpCodes(), + []OpCode{{'i', 0, 0, 0, 1}, {'e', 0, 100, 1, 101}}) + assertEqual(t, len(sm.bPopular), 0) + + sm = NewMatcher(splitChars(rep("b", 100)), + splitChars(rep("b", 50)+"a"+rep("b", 50))) + assertAlmostEqual(t, sm.Ratio(), 0.995, 3) + assertEqual(t, sm.GetOpCodes(), + []OpCode{{'e', 0, 50, 0, 50}, {'i', 50, 50, 50, 51}, {'e', 50, 100, 51, 101}}) + assertEqual(t, len(sm.bPopular), 0) +} + +func TestWithAsciiOnDelete(t *testing.T) { + sm := NewMatcher(splitChars(rep("a", 40)+"c"+rep("b", 40)), + splitChars(rep("a", 40)+rep("b", 40))) + assertAlmostEqual(t, sm.Ratio(), 0.994, 3) + assertEqual(t, sm.GetOpCodes(), + []OpCode{{'e', 0, 40, 0, 40}, {'d', 40, 41, 40, 40}, {'e', 41, 81, 40, 80}}) +} + +func TestWithAsciiBJunk(t *testing.T) { + isJunk := func(s string) bool { + return s == " " + } + sm := NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), + splitChars(rep("a", 44)+rep("b", 40)), true, isJunk) + assertEqual(t, sm.bJunk, map[string]struct{}{}) + + sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), + splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk) + assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}}) + + isJunk = func(s string) bool { + return s == " " || s == "b" + } + sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), + splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk) + assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}, "b": struct{}{}}) +} + +func TestSFBugsRatioForNullSeqn(t *testing.T) { + sm := NewMatcher(nil, nil) + assertEqual(t, sm.Ratio(), 1.0) + assertEqual(t, sm.QuickRatio(), 1.0) + assertEqual(t, sm.RealQuickRatio(), 1.0) +} + +func TestSFBugsComparingEmptyLists(t *testing.T) { + groups := NewMatcher(nil, nil).GetGroupedOpCodes(-1) + assertEqual(t, len(groups), 0) + diff := UnifiedDiff{ + FromFile: "Original", + ToFile: "Current", + Context: 3, + } + result, err := GetUnifiedDiffString(diff) + assertEqual(t, err, nil) + assertEqual(t, result, "") +} + +func TestOutputFormatRangeFormatUnified(t *testing.T) { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + // + // Each field shall be of the form: + // %1d", if the range contains exactly one line, + // and: + // "%1d,%1d", , otherwise. + // If a range is empty, its beginning line number shall be the number of + // the line just before the range, or 0 if the empty range starts the file. + fm := formatRangeUnified + assertEqual(t, fm(3, 3), "3,0") + assertEqual(t, fm(3, 4), "4") + assertEqual(t, fm(3, 5), "4,2") + assertEqual(t, fm(3, 6), "4,3") + assertEqual(t, fm(0, 0), "0,0") +} + +func TestOutputFormatRangeFormatContext(t *testing.T) { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + // + // The range of lines in file1 shall be written in the following format + // if the range contains two or more lines: + // "*** %d,%d ****\n", , + // and the following format otherwise: + // "*** %d ****\n", + // The ending line number of an empty range shall be the number of the preceding line, + // or 0 if the range is at the start of the file. + // + // Next, the range of lines in file2 shall be written in the following format + // if the range contains two or more lines: + // "--- %d,%d ----\n", , + // and the following format otherwise: + // "--- %d ----\n", + fm := formatRangeContext + assertEqual(t, fm(3, 3), "3") + assertEqual(t, fm(3, 4), "4") + assertEqual(t, fm(3, 5), "4,5") + assertEqual(t, fm(3, 6), "4,6") + assertEqual(t, fm(0, 0), "0") +} + +func TestOutputFormatTabDelimiter(t *testing.T) { + diff := UnifiedDiff{ + A: splitChars("one"), + B: splitChars("two"), + FromFile: "Original", + FromDate: "2005-01-26 23:30:50", + ToFile: "Current", + ToDate: "2010-04-12 10:20:52", + Eol: "\n", + } + ud, err := GetUnifiedDiffString(diff) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(ud)[:2], []string{ + "--- Original\t2005-01-26 23:30:50\n", + "+++ Current\t2010-04-12 10:20:52\n", + }) + cd, err := GetContextDiffString(ContextDiff(diff)) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(cd)[:2], []string{ + "*** Original\t2005-01-26 23:30:50\n", + "--- Current\t2010-04-12 10:20:52\n", + }) +} + +func TestOutputFormatNoTrailingTabOnEmptyFiledate(t *testing.T) { + diff := UnifiedDiff{ + A: splitChars("one"), + B: splitChars("two"), + FromFile: "Original", + ToFile: "Current", + Eol: "\n", + } + ud, err := GetUnifiedDiffString(diff) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(ud)[:2], []string{"--- Original\n", "+++ Current\n"}) + + cd, err := GetContextDiffString(ContextDiff(diff)) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(cd)[:2], []string{"*** Original\n", "--- Current\n"}) +} + +func TestOmitFilenames(t *testing.T) { + diff := UnifiedDiff{ + A: SplitLines("o\nn\ne\n"), + B: SplitLines("t\nw\no\n"), + Eol: "\n", + } + ud, err := GetUnifiedDiffString(diff) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(ud), []string{ + "@@ -0,0 +1,2 @@\n", + "+t\n", + "+w\n", + "@@ -2,2 +3,0 @@\n", + "-n\n", + "-e\n", + "\n", + }) + + cd, err := GetContextDiffString(ContextDiff(diff)) + assertEqual(t, err, nil) + assertEqual(t, SplitLines(cd), []string{ + "***************\n", + "*** 0 ****\n", + "--- 1,2 ----\n", + "+ t\n", + "+ w\n", + "***************\n", + "*** 2,3 ****\n", + "- n\n", + "- e\n", + "--- 3 ----\n", + "\n", + }) +} + +func TestSplitLines(t *testing.T) { + allTests := []struct { + input string + want []string + }{ + {"foo", []string{"foo\n"}}, + {"foo\nbar", []string{"foo\n", "bar\n"}}, + {"foo\nbar\n", []string{"foo\n", "bar\n", "\n"}}, + } + for _, test := range allTests { + assertEqual(t, SplitLines(test.input), test.want) + } +} + +func benchmarkSplitLines(b *testing.B, count int) { + str := strings.Repeat("foo\n", count) + + b.ResetTimer() + + n := 0 + for i := 0; i < b.N; i++ { + n += len(SplitLines(str)) + } +} + +func BenchmarkSplitLines100(b *testing.B) { + benchmarkSplitLines(b, 100) +} + +func BenchmarkSplitLines10000(b *testing.B) { + benchmarkSplitLines(b, 10000) +} diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml new file mode 100644 index 000000000..0637db726 --- /dev/null +++ b/vendor/github.com/spf13/afero/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go + +go: + - 1.9 + - "1.10" + - tip + +os: + - linux + - osx + +matrix: + allow_failures: + - go: tip + fast_finish: true + +script: + - go build + - go test -race -v ./... + diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 000000000..298f0e266 --- /dev/null +++ b/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md new file mode 100644 index 000000000..0c9b04b53 --- /dev/null +++ b/vendor/github.com/spf13/afero/README.md @@ -0,0 +1,452 @@ +![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) + +A FileSystem Abstraction System for Go + +[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +# Overview + +Afero is an filesystem framework providing a simple, uniform and universal API +interacting with any filesystem, as an abstraction layer providing interfaces, +types and methods. Afero has an exceptionally clean interface and simple design +without needless constructors or initialization methods. + +Afero is also a library providing a base set of interoperable backend +filesystems that make it easy to work with afero while retaining all the power +and benefit of the os and ioutil packages. + +Afero provides significant improvements over using the os package alone, most +notably the ability to create mock and testing filesystems without relying on the disk. + +It is suitable for use in a any situation where you would consider using the OS +package as it provides an additional abstraction that makes it easy to use a +memory backed file system during testing. It also adds support for the http +filesystem for full interoperability. + + +## Afero Features + +* A single consistent API for accessing a variety of filesystems +* Interoperation between a variety of file system types +* A set of interfaces to encourage and enforce interoperability between backends +* An atomic cross platform memory backed file system +* Support for compositional (union) file systems by combining multiple file systems acting as one +* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) +* A set of utility functions ported from io, ioutil & hugo to be afero aware + + +# Using Afero + +Afero is easy to use and easier to adopt. + +A few different ways you could use Afero: + +* Use the interfaces alone to define you own file system. +* Wrap for the OS packages. +* Define different filesystems for different parts of your application. +* Use Afero for mock filesystems while testing + +## Step 1: Install Afero + +First use go get to install the latest version of the library. + + $ go get github.com/spf13/afero + +Next include Afero in your application. +```go +import "github.com/spf13/afero" +``` + +## Step 2: Declare a backend + +First define a package variable and set it to a pointer to a filesystem. +```go +var AppFs = afero.NewMemMapFs() + +or + +var AppFs = afero.NewOsFs() +``` +It is important to note that if you repeat the composite literal you +will be using a completely new and isolated filesystem. In the case of +OsFs it will still use the same underlying filesystem but will reduce +the ability to drop in other filesystems as desired. + +## Step 3: Use it like you would the OS package + +Throughout your application use any function and method like you normally +would. + +So if my application before had: +```go +os.Open('/tmp/foo') +``` +We would replace it with: +```go +AppFs.Open('/tmp/foo') +``` + +`AppFs` being the variable we defined above. + + +## List of all available functions + +File System Methods Available: +```go +Chmod(name string, mode os.FileMode) : error +Chtimes(name string, atime time.Time, mtime time.Time) : error +Create(name string) : File, error +Mkdir(name string, perm os.FileMode) : error +MkdirAll(path string, perm os.FileMode) : error +Name() : string +Open(name string) : File, error +OpenFile(name string, flag int, perm os.FileMode) : File, error +Remove(name string) : error +RemoveAll(path string) : error +Rename(oldname, newname string) : error +Stat(name string) : os.FileInfo, error +``` +File Interfaces and Methods Available: +```go +io.Closer +io.Reader +io.ReaderAt +io.Seeker +io.Writer +io.WriterAt + +Name() : string +Readdir(count int) : []os.FileInfo, error +Readdirnames(n int) : []string, error +Stat() : os.FileInfo, error +Sync() : error +Truncate(size int64) : error +WriteString(s string) : ret int, err error +``` +In some applications it may make sense to define a new package that +simply exports the file system variable for easy access from anywhere. + +## Using Afero's utility functions + +Afero provides a set of functions to make it easier to use the underlying file systems. +These functions have been primarily ported from io & ioutil with some developed for Hugo. + +The afero utilities support all afero compatible backends. + +The list of utilities includes: + +```go +DirExists(path string) (bool, error) +Exists(path string) (bool, error) +FileContainsBytes(filename string, subslice []byte) (bool, error) +GetTempDir(subPath string) string +IsDir(path string) (bool, error) +IsEmpty(path string) (bool, error) +ReadDir(dirname string) ([]os.FileInfo, error) +ReadFile(filename string) ([]byte, error) +SafeWriteReader(path string, r io.Reader) (err error) +TempDir(dir, prefix string) (name string, err error) +TempFile(dir, prefix string) (f File, err error) +Walk(root string, walkFn filepath.WalkFunc) error +WriteFile(filename string, data []byte, perm os.FileMode) error +WriteReader(path string, r io.Reader) (err error) +``` +For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) + +They are available under two different approaches to use. You can either call +them directly where the first parameter of each function will be the file +system, or you can declare a new `Afero`, a custom type used to bind these +functions as methods to a given filesystem. + +### Calling utilities directly + +```go +fs := new(afero.MemMapFs) +f, err := afero.TempFile(fs,"", "ioutil-test") + +``` + +### Calling via Afero + +```go +fs := afero.NewMemMapFs() +afs := &afero.Afero{Fs: fs} +f, err := afs.TempFile("", "ioutil-test") +``` + +## Using Afero for Testing + +There is a large benefit to using a mock filesystem for testing. It has a +completely blank state every time it is initialized and can be easily +reproducible regardless of OS. You could create files to your heart’s content +and the file access would be fast while also saving you from all the annoying +issues with deleting temporary files, Windows file locking, etc. The MemMapFs +backend is perfect for testing. + +* Much faster than performing I/O operations on disk +* Avoid security issues and permissions +* Far more control. 'rm -rf /' with confidence +* Test setup is far more easier to do +* No test cleanup needed + +One way to accomplish this is to define a variable as mentioned above. +In your application this will be set to afero.NewOsFs() during testing you +can set it to afero.NewMemMapFs(). + +It wouldn't be uncommon to have each test initialize a blank slate memory +backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere +appropriate in my application code. This approach ensures that Tests are order +independent, with no test relying on the state left by an earlier test. + +Then in my tests I would initialize a new MemMapFs for each test: +```go +func TestExist(t *testing.T) { + appFS := afero.NewMemMapFs() + // create test files and directories + appFS.MkdirAll("src/a", 0755) + afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) + afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) + name := "src/c" + _, err := appFS.Stat(name) + if os.IsNotExist(err) { + t.Errorf("file \"%s\" does not exist.\n", name) + } +} +``` + +# Available Backends + +## Operating System Native + +### OsFs + +The first is simply a wrapper around the native OS calls. This makes it +very easy to use as all of the calls are the same as the existing OS +calls. It also makes it trivial to have your code use the OS during +operation and a mock filesystem during testing or as needed. + +```go +appfs := afero.NewOsFs() +appfs.MkdirAll("src/a", 0755)) +``` + +## Memory Backed Storage + +### MemMapFs + +Afero also provides a fully atomic memory backed filesystem perfect for use in +mocking and to speed up unnecessary disk io when persistence isn’t +necessary. It is fully concurrent and will work within go routines +safely. + +```go +mm := afero.NewMemMapFs() +mm.MkdirAll("src/a", 0755)) +``` + +#### InMemoryFile + +As part of MemMapFs, Afero also provides an atomic, fully concurrent memory +backed file implementation. This can be used in other memory backed file +systems with ease. Plans are to add a radix tree memory stored file +system using InMemoryFile. + +## Network Interfaces + +### SftpFs + +Afero has experimental support for secure file transfer protocol (sftp). Which can +be used to perform file operations over a encrypted channel. + +## Filtering Backends + +### BasePathFs + +The BasePathFs restricts all operations to a given path within an Fs. +The given file name to the operations on this Fs will be prepended with +the base path before calling the source Fs. + +```go +bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") +``` + +### ReadOnlyFs + +A thin wrapper around the source Fs providing a read only view. + +```go +fs := afero.NewReadOnlyFs(afero.NewOsFs()) +_, err := fs.Create("/file.txt") +// err = syscall.EPERM +``` + +# RegexpFs + +A filtered view on file names, any file NOT matching +the passed regexp will be treated as non-existing. +Files not matching the regexp provided will not be created. +Directories are not filtered. + +```go +fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) +_, err := fs.Create("/file.html") +// err = syscall.ENOENT +``` + +### HttpFs + +Afero provides an http compatible backend which can wrap any of the existing +backends. + +The Http package requires a slightly specific version of Open which +returns an http.File type. + +Afero provides an httpFs file system which satisfies this requirement. +Any Afero FileSystem can be used as an httpFs. + +```go +httpFs := afero.NewHttpFs() +fileserver := http.FileServer(httpFs.Dir())) +http.Handle("/", fileserver) +``` + +## Composite Backends + +Afero provides the ability have two filesystems (or more) act as a single +file system. + +### CacheOnReadFs + +The CacheOnReadFs will lazily make copies of any accessed files from the base +layer into the overlay. Subsequent reads will be pulled from the overlay +directly permitting the request is within the cache duration of when it was +created in the overlay. + +If the base filesystem is writeable, any changes to files will be +done first to the base, then to the overlay layer. Write calls to open file +handles like `Write()` or `Truncate()` to the overlay first. + +To writing files to the overlay only, you can use the overlay Fs directly (not +via the union Fs). + +Cache files in the layer for the given time.Duration, a cache duration of 0 +means "forever" meaning the file will not be re-requested from the base ever. + +A read-only base will make the overlay also read-only but still copy files +from the base to the overlay when they're not present (or outdated) in the +caching layer. + +```go +base := afero.NewOsFs() +layer := afero.NewMemMapFs() +ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) +``` + +### CopyOnWriteFs() + +The CopyOnWriteFs is a read only base file system with a potentially +writeable layer on top. + +Read operations will first look in the overlay and if not found there, will +serve the file from the base. + +Changes to the file system will only be made in the overlay. + +Any attempt to modify a file found only in the base will copy the file to the +overlay layer before modification (including opening a file with a writable +handle). + +Removing and Renaming files present only in the base layer is not currently +permitted. If a file is present in the base layer and the overlay, only the +overlay will be removed/renamed. + +```go + base := afero.NewOsFs() + roBase := afero.NewReadOnlyFs(base) + ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) + + fh, _ = ufs.Create("/home/test/file2.txt") + fh.WriteString("This is a test") + fh.Close() +``` + +In this example all write operations will only occur in memory (MemMapFs) +leaving the base filesystem (OsFs) untouched. + + +## Desired/possible backends + +The following is a short list of possible backends we hope someone will +implement: + +* SSH +* ZIP +* TAR +* S3 + +# About the project + +## What's in the name + +Afero comes from the latin roots Ad-Facere. + +**"Ad"** is a prefix meaning "to". + +**"Facere"** is a form of the root "faciō" making "make or do". + +The literal meaning of afero is "to make" or "to do" which seems very fitting +for a library that allows one to make files and directories and do things with them. + +The English word that shares the same roots as Afero is "affair". Affair shares +the same concept but as a noun it means "something that is made or done" or "an +object of a particular type". + +It's also nice that unlike some of my other libraries (hugo, cobra, viper) it +Googles very well. + +## Release Notes + +* **0.10.0** 2015.12.10 + * Full compatibility with Windows + * Introduction of afero utilities + * Test suite rewritten to work cross platform + * Normalize paths for MemMapFs + * Adding Sync to the file interface + * **Breaking Change** Walk and ReadDir have changed parameter order + * Moving types used by MemMapFs to a subpackage + * General bugfixes and improvements +* **0.9.0** 2015.11.05 + * New Walk function similar to filepath.Walk + * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC + * MemMapFs.Remove now really deletes the file + * InMemoryFile.Readdir and Readdirnames work correctly + * InMemoryFile functions lock it for concurrent access + * Test suite improvements +* **0.8.0** 2014.10.28 + * First public version + * Interfaces feel ready for people to build using + * Interfaces satisfy all known uses + * MemMapFs passes the majority of the OS test suite + * OsFs passes the majority of the OS test suite + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13) +* [jaqx0r](https://github.com/jaqx0r) +* [mbertschler](https://github.com/mbertschler) +* [xor-gate](https://github.com/xor-gate) + +## License + +Afero is released under the Apache 2.0 license. See +[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 000000000..f5b5e127c --- /dev/null +++ b/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,108 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + //Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + //Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/afero_test.go b/vendor/github.com/spf13/afero/afero_test.go new file mode 100644 index 000000000..526afa975 --- /dev/null +++ b/vendor/github.com/spf13/afero/afero_test.go @@ -0,0 +1,699 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + "testing" +) + +var testName = "test.txt" +var Fss = []Fs{&MemMapFs{}, &OsFs{}} + +var testRegistry map[Fs][]string = make(map[Fs][]string) + +func testDir(fs Fs) string { + name, err := TempDir(fs, "", "afero") + if err != nil { + panic(fmt.Sprint("unable to work with test dir", err)) + } + testRegistry[fs] = append(testRegistry[fs], name) + + return name +} + +func tmpFile(fs Fs) File { + x, err := TempFile(fs, "", "afero") + + if err != nil { + panic(fmt.Sprint("unable to work with temp file", err)) + } + + testRegistry[fs] = append(testRegistry[fs], x.Name()) + + return x +} + +//Read with length 0 should not return EOF. +func TestRead0(t *testing.T) { + for _, fs := range Fss { + f := tmpFile(fs) + defer f.Close() + f.WriteString("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.") + + var b []byte + // b := make([]byte, 0) + n, err := f.Read(b) + if n != 0 || err != nil { + t.Errorf("%v: Read(0) = %d, %v, want 0, nil", fs.Name(), n, err) + } + f.Seek(0, 0) + b = make([]byte, 100) + n, err = f.Read(b) + if n <= 0 || err != nil { + t.Errorf("%v: Read(100) = %d, %v, want >0, nil", fs.Name(), n, err) + } + } +} + +func TestOpenFile(t *testing.T) { + defer removeAllTestFiles(t) + for _, fs := range Fss { + tmp := testDir(fs) + path := filepath.Join(tmp, testName) + + f, err := fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + t.Error(fs.Name(), "OpenFile (O_CREATE) failed:", err) + continue + } + io.WriteString(f, "initial") + f.Close() + + f, err = fs.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + t.Error(fs.Name(), "OpenFile (O_APPEND) failed:", err) + continue + } + io.WriteString(f, "|append") + f.Close() + + f, err = fs.OpenFile(path, os.O_RDONLY, 0600) + contents, _ := ioutil.ReadAll(f) + expectedContents := "initial|append" + if string(contents) != expectedContents { + t.Errorf("%v: appending, expected '%v', got: '%v'", fs.Name(), expectedContents, string(contents)) + } + f.Close() + + f, err = fs.OpenFile(path, os.O_RDWR|os.O_TRUNC, 0600) + if err != nil { + t.Error(fs.Name(), "OpenFile (O_TRUNC) failed:", err) + continue + } + contents, _ = ioutil.ReadAll(f) + if string(contents) != "" { + t.Errorf("%v: expected truncated file, got: '%v'", fs.Name(), string(contents)) + } + f.Close() + } +} + +func TestCreate(t *testing.T) { + defer removeAllTestFiles(t) + for _, fs := range Fss { + tmp := testDir(fs) + path := filepath.Join(tmp, testName) + + f, err := fs.Create(path) + if err != nil { + t.Error(fs.Name(), "Create failed:", err) + f.Close() + continue + } + io.WriteString(f, "initial") + f.Close() + + f, err = fs.Create(path) + if err != nil { + t.Error(fs.Name(), "Create failed:", err) + f.Close() + continue + } + secondContent := "second create" + io.WriteString(f, secondContent) + f.Close() + + f, err = fs.Open(path) + if err != nil { + t.Error(fs.Name(), "Open failed:", err) + f.Close() + continue + } + buf, err := ReadAll(f) + if err != nil { + t.Error(fs.Name(), "ReadAll failed:", err) + f.Close() + continue + } + if string(buf) != secondContent { + t.Error(fs.Name(), "Content should be", "\""+secondContent+"\" but is \""+string(buf)+"\"") + f.Close() + continue + } + f.Close() + } +} + +func TestMemFileRead(t *testing.T) { + f := tmpFile(new(MemMapFs)) + // f := MemFileCreate("testfile") + f.WriteString("abcd") + f.Seek(0, 0) + b := make([]byte, 8) + n, err := f.Read(b) + if n != 4 { + t.Errorf("didn't read all bytes: %v %v %v", n, err, b) + } + if err != nil { + t.Errorf("err is not nil: %v %v %v", n, err, b) + } + n, err = f.Read(b) + if n != 0 { + t.Errorf("read more bytes: %v %v %v", n, err, b) + } + if err != io.EOF { + t.Errorf("error is not EOF: %v %v %v", n, err, b) + } +} + +func TestRename(t *testing.T) { + defer removeAllTestFiles(t) + for _, fs := range Fss { + tDir := testDir(fs) + from := filepath.Join(tDir, "/renamefrom") + to := filepath.Join(tDir, "/renameto") + exists := filepath.Join(tDir, "/renameexists") + file, err := fs.Create(from) + if err != nil { + t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err) + } + if err = file.Close(); err != nil { + t.Errorf("%s: close %q failed: %v", fs.Name(), to, err) + } + file, err = fs.Create(exists) + if err != nil { + t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err) + } + if err = file.Close(); err != nil { + t.Errorf("%s: close %q failed: %v", fs.Name(), to, err) + } + err = fs.Rename(from, to) + if err != nil { + t.Fatalf("%s: rename %q, %q failed: %v", fs.Name(), to, from, err) + } + file, err = fs.Create(from) + if err != nil { + t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err) + } + if err = file.Close(); err != nil { + t.Errorf("%s: close %q failed: %v", fs.Name(), to, err) + } + err = fs.Rename(from, exists) + if err != nil { + t.Errorf("%s: rename %q, %q failed: %v", fs.Name(), exists, from, err) + } + names, err := readDirNames(fs, tDir) + if err != nil { + t.Errorf("%s: readDirNames error: %v", fs.Name(), err) + } + found := false + for _, e := range names { + if e == "renamefrom" { + t.Error("File is still called renamefrom") + } + if e == "renameto" { + found = true + } + } + if !found { + t.Error("File was not renamed to renameto") + } + + _, err = fs.Stat(to) + if err != nil { + t.Errorf("%s: stat %q failed: %v", fs.Name(), to, err) + } + } +} + +func TestRemove(t *testing.T) { + for _, fs := range Fss { + + x, err := TempFile(fs, "", "afero") + if err != nil { + t.Error(fmt.Sprint("unable to work with temp file", err)) + } + + path := x.Name() + x.Close() + + tDir := filepath.Dir(path) + + err = fs.Remove(path) + if err != nil { + t.Errorf("%v: Remove() failed: %v", fs.Name(), err) + continue + } + + _, err = fs.Stat(path) + if !os.IsNotExist(err) { + t.Errorf("%v: Remove() didn't remove file", fs.Name()) + continue + } + + // Deleting non-existent file should raise error + err = fs.Remove(path) + if !os.IsNotExist(err) { + t.Errorf("%v: Remove() didn't raise error for non-existent file", fs.Name()) + } + + f, err := fs.Open(tDir) + if err != nil { + t.Error("TestDir should still exist:", err) + } + + names, err := f.Readdirnames(-1) + if err != nil { + t.Error("Readdirnames failed:", err) + } + + for _, e := range names { + if e == testName { + t.Error("File was not removed from parent directory") + } + } + } +} + +func TestTruncate(t *testing.T) { + defer removeAllTestFiles(t) + for _, fs := range Fss { + f := tmpFile(fs) + defer f.Close() + + checkSize(t, f, 0) + f.Write([]byte("hello, world\n")) + checkSize(t, f, 13) + f.Truncate(10) + checkSize(t, f, 10) + f.Truncate(1024) + checkSize(t, f, 1024) + f.Truncate(0) + checkSize(t, f, 0) + _, err := f.Write([]byte("surprise!")) + if err == nil { + checkSize(t, f, 13+9) // wrote at offset past where hello, world was. + } + } +} + +func TestSeek(t *testing.T) { + defer removeAllTestFiles(t) + for _, fs := range Fss { + f := tmpFile(fs) + defer f.Close() + + const data = "hello, world\n" + io.WriteString(f, data) + + type test struct { + in int64 + whence int + out int64 + } + var tests = []test{ + {0, 1, int64(len(data))}, + {0, 0, 0}, + {5, 0, 5}, + {0, 2, int64(len(data))}, + {0, 0, 0}, + {-1, 2, int64(len(data)) - 1}, + {1 << 33, 0, 1 << 33}, + {1 << 33, 2, 1<<33 + int64(len(data))}, + } + for i, tt := range tests { + off, err := f.Seek(tt.in, tt.whence) + if off != tt.out || err != nil { + if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL && tt.out > 1<<32 { + // Reiserfs rejects the big seeks. + // http://code.google.com/p/go/issues/detail?id=91 + break + } + t.Errorf("#%d: Seek(%v, %v) = %v, %v want %v, nil", i, tt.in, tt.whence, off, err, tt.out) + } + } + } +} + +func TestReadAt(t *testing.T) { + defer removeAllTestFiles(t) + for _, fs := range Fss { + f := tmpFile(fs) + defer f.Close() + + const data = "hello, world\n" + io.WriteString(f, data) + + b := make([]byte, 5) + n, err := f.ReadAt(b, 7) + if err != nil || n != len(b) { + t.Fatalf("ReadAt 7: %d, %v", n, err) + } + if string(b) != "world" { + t.Fatalf("ReadAt 7: have %q want %q", string(b), "world") + } + } +} + +func TestWriteAt(t *testing.T) { + defer removeAllTestFiles(t) + for _, fs := range Fss { + f := tmpFile(fs) + defer f.Close() + + const data = "hello, world\n" + io.WriteString(f, data) + + n, err := f.WriteAt([]byte("WORLD"), 7) + if err != nil || n != 5 { + t.Fatalf("WriteAt 7: %d, %v", n, err) + } + + f2, err := fs.Open(f.Name()) + if err != nil { + t.Fatalf("%v: ReadFile %s: %v", fs.Name(), f.Name(), err) + } + defer f2.Close() + buf := new(bytes.Buffer) + buf.ReadFrom(f2) + b := buf.Bytes() + if string(b) != "hello, WORLD\n" { + t.Fatalf("after write: have %q want %q", string(b), "hello, WORLD\n") + } + + } +} + +func setupTestDir(t *testing.T, fs Fs) string { + path := testDir(fs) + return setupTestFiles(t, fs, path) +} + +func setupTestDirRoot(t *testing.T, fs Fs) string { + path := testDir(fs) + setupTestFiles(t, fs, path) + return path +} + +func setupTestDirReusePath(t *testing.T, fs Fs, path string) string { + testRegistry[fs] = append(testRegistry[fs], path) + return setupTestFiles(t, fs, path) +} + +func setupTestFiles(t *testing.T, fs Fs, path string) string { + testSubDir := filepath.Join(path, "more", "subdirectories", "for", "testing", "we") + err := fs.MkdirAll(testSubDir, 0700) + if err != nil && !os.IsExist(err) { + t.Fatal(err) + } + + f, err := fs.Create(filepath.Join(testSubDir, "testfile1")) + if err != nil { + t.Fatal(err) + } + f.WriteString("Testfile 1 content") + f.Close() + + f, err = fs.Create(filepath.Join(testSubDir, "testfile2")) + if err != nil { + t.Fatal(err) + } + f.WriteString("Testfile 2 content") + f.Close() + + f, err = fs.Create(filepath.Join(testSubDir, "testfile3")) + if err != nil { + t.Fatal(err) + } + f.WriteString("Testfile 3 content") + f.Close() + + f, err = fs.Create(filepath.Join(testSubDir, "testfile4")) + if err != nil { + t.Fatal(err) + } + f.WriteString("Testfile 4 content") + f.Close() + return testSubDir +} + +func TestReaddirnames(t *testing.T) { + defer removeAllTestFiles(t) + for _, fs := range Fss { + testSubDir := setupTestDir(t, fs) + tDir := filepath.Dir(testSubDir) + + root, err := fs.Open(tDir) + if err != nil { + t.Fatal(fs.Name(), tDir, err) + } + defer root.Close() + + namesRoot, err := root.Readdirnames(-1) + if err != nil { + t.Fatal(fs.Name(), namesRoot, err) + } + + sub, err := fs.Open(testSubDir) + if err != nil { + t.Fatal(err) + } + defer sub.Close() + + namesSub, err := sub.Readdirnames(-1) + if err != nil { + t.Fatal(fs.Name(), namesSub, err) + } + + findNames(fs, t, tDir, testSubDir, namesRoot, namesSub) + } +} + +func TestReaddirSimple(t *testing.T) { + defer removeAllTestFiles(t) + for _, fs := range Fss { + testSubDir := setupTestDir(t, fs) + tDir := filepath.Dir(testSubDir) + + root, err := fs.Open(tDir) + if err != nil { + t.Fatal(err) + } + defer root.Close() + + rootInfo, err := root.Readdir(1) + if err != nil { + t.Log(myFileInfo(rootInfo)) + t.Error(err) + } + + rootInfo, err = root.Readdir(5) + if err != io.EOF { + t.Log(myFileInfo(rootInfo)) + t.Error(err) + } + + sub, err := fs.Open(testSubDir) + if err != nil { + t.Fatal(err) + } + defer sub.Close() + + subInfo, err := sub.Readdir(5) + if err != nil { + t.Log(myFileInfo(subInfo)) + t.Error(err) + } + } +} + +func TestReaddir(t *testing.T) { + defer removeAllTestFiles(t) + for num := 0; num < 6; num++ { + outputs := make([]string, len(Fss)) + infos := make([]string, len(Fss)) + for i, fs := range Fss { + testSubDir := setupTestDir(t, fs) + //tDir := filepath.Dir(testSubDir) + root, err := fs.Open(testSubDir) + if err != nil { + t.Fatal(err) + } + defer root.Close() + + for j := 0; j < 6; j++ { + info, err := root.Readdir(num) + outputs[i] += fmt.Sprintf("%v Error: %v\n", myFileInfo(info), err) + infos[i] += fmt.Sprintln(len(info), err) + } + } + + fail := false + for i, o := range infos { + if i == 0 { + continue + } + if o != infos[i-1] { + fail = true + break + } + } + if fail { + t.Log("Readdir outputs not equal for Readdir(", num, ")") + for i, o := range outputs { + t.Log(Fss[i].Name()) + t.Log(o) + } + t.Fail() + } + } +} + +type myFileInfo []os.FileInfo + +func (m myFileInfo) String() string { + out := "Fileinfos:\n" + for _, e := range m { + out += " " + e.Name() + "\n" + } + return out +} + +func TestReaddirAll(t *testing.T) { + defer removeAllTestFiles(t) + for _, fs := range Fss { + testSubDir := setupTestDir(t, fs) + tDir := filepath.Dir(testSubDir) + + root, err := fs.Open(tDir) + if err != nil { + t.Fatal(err) + } + defer root.Close() + + rootInfo, err := root.Readdir(-1) + if err != nil { + t.Fatal(err) + } + var namesRoot = []string{} + for _, e := range rootInfo { + namesRoot = append(namesRoot, e.Name()) + } + + sub, err := fs.Open(testSubDir) + if err != nil { + t.Fatal(err) + } + defer sub.Close() + + subInfo, err := sub.Readdir(-1) + if err != nil { + t.Fatal(err) + } + var namesSub = []string{} + for _, e := range subInfo { + namesSub = append(namesSub, e.Name()) + } + + findNames(fs, t, tDir, testSubDir, namesRoot, namesSub) + } +} + +func findNames(fs Fs, t *testing.T, tDir, testSubDir string, root, sub []string) { + var foundRoot bool + for _, e := range root { + f, err := fs.Open(filepath.Join(tDir, e)) + if err != nil { + t.Error("Open", filepath.Join(tDir, e), ":", err) + } + defer f.Close() + + if equal(e, "we") { + foundRoot = true + } + } + if !foundRoot { + t.Logf("Names root: %v", root) + t.Logf("Names sub: %v", sub) + t.Error("Didn't find subdirectory we") + } + + var found1, found2 bool + for _, e := range sub { + f, err := fs.Open(filepath.Join(testSubDir, e)) + if err != nil { + t.Error("Open", filepath.Join(testSubDir, e), ":", err) + } + defer f.Close() + + if equal(e, "testfile1") { + found1 = true + } + if equal(e, "testfile2") { + found2 = true + } + } + + if !found1 { + t.Logf("Names root: %v", root) + t.Logf("Names sub: %v", sub) + t.Error("Didn't find testfile1") + } + if !found2 { + t.Logf("Names root: %v", root) + t.Logf("Names sub: %v", sub) + t.Error("Didn't find testfile2") + } +} + +func removeAllTestFiles(t *testing.T) { + for fs, list := range testRegistry { + for _, path := range list { + if err := fs.RemoveAll(path); err != nil { + t.Error(fs.Name(), err) + } + } + } + testRegistry = make(map[Fs][]string) +} + +func equal(name1, name2 string) (r bool) { + switch runtime.GOOS { + case "windows": + r = strings.ToLower(name1) == strings.ToLower(name2) + default: + r = name1 == name2 + } + return +} + +func checkSize(t *testing.T, f File, size int64) { + dir, err := f.Stat() + if err != nil { + t.Fatalf("Stat %q (looking for size %d): %s", f.Name(), size, err) + } + if dir.Size() != size { + t.Errorf("Stat %q: size %d want %d", f.Name(), dir.Size(), size) + } +} diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml new file mode 100644 index 000000000..a633ad500 --- /dev/null +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -0,0 +1,15 @@ +version: '{build}' +clone_folder: C:\gopath\src\github.com\spf13\afero +environment: + GOPATH: C:\gopath +build_script: +- cmd: >- + go version + + go env + + go get -v github.com/spf13/afero/... + + go build github.com/spf13/afero +test_script: +- cmd: go test -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 000000000..616ff8ff7 --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,180 @@ +package afero + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +var _ Lstater = (*BasePathFs)(nil) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +type BasePathFile struct { + File + path string +} + +func (f *BasePathFile) Name() string { + sourcename := f.File.Name() + return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return name, err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return os.ErrNotExist + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := b.source.OpenFile(name, flag, mode) + if err != nil { + return nil, err + } + return &BasePathFile{sourcef, b.path}, nil +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := b.source.Open(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := b.source.Create(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + name, err := b.RealPath(name) + if err != nil { + return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} + } + if lstater, ok := b.source.(Lstater); ok { + return lstater.LstatIfPossible(name) + } + fi, err := b.source.Stat(name) + return fi, false, err +} + +// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/basepath_test.go b/vendor/github.com/spf13/afero/basepath_test.go new file mode 100644 index 000000000..77aa7dfce --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath_test.go @@ -0,0 +1,190 @@ +package afero + +import ( + "os" + "path/filepath" + "runtime" + "testing" +) + +func TestBasePath(t *testing.T) { + baseFs := &MemMapFs{} + baseFs.MkdirAll("/base/path/tmp", 0777) + bp := NewBasePathFs(baseFs, "/base/path") + + if _, err := bp.Create("/tmp/foo"); err != nil { + t.Errorf("Failed to set real path") + } + + if fh, err := bp.Create("../tmp/bar"); err == nil { + t.Errorf("succeeded in creating %s ...", fh.Name()) + } +} + +func TestBasePathRoot(t *testing.T) { + baseFs := &MemMapFs{} + baseFs.MkdirAll("/base/path/foo/baz", 0777) + baseFs.MkdirAll("/base/path/boo/", 0777) + bp := NewBasePathFs(baseFs, "/base/path") + + rd, err := ReadDir(bp, string(os.PathSeparator)) + + if len(rd) != 2 { + t.Errorf("base path doesn't respect root") + } + + if err != nil { + t.Error(err) + } +} + +func TestRealPath(t *testing.T) { + fs := NewOsFs() + baseDir, err := TempDir(fs, "", "base") + if err != nil { + t.Fatal("error creating tempDir", err) + } + defer fs.RemoveAll(baseDir) + anotherDir, err := TempDir(fs, "", "another") + if err != nil { + t.Fatal("error creating tempDir", err) + } + defer fs.RemoveAll(anotherDir) + + bp := NewBasePathFs(fs, baseDir).(*BasePathFs) + + subDir := filepath.Join(baseDir, "s1") + + realPath, err := bp.RealPath("/s1") + + if err != nil { + t.Errorf("Got error %s", err) + } + + if realPath != subDir { + t.Errorf("Expected \n%s got \n%s", subDir, realPath) + } + + if runtime.GOOS == "windows" { + _, err = bp.RealPath(anotherDir) + + if err != os.ErrNotExist { + t.Errorf("Expected os.ErrNotExist") + } + + } else { + // on *nix we have no way of just looking at the path and tell that anotherDir + // is not inside the base file system. + // The user will receive an os.ErrNotExist later. + surrealPath, err := bp.RealPath(anotherDir) + + if err != nil { + t.Errorf("Got error %s", err) + } + + excpected := filepath.Join(baseDir, anotherDir) + + if surrealPath != excpected { + t.Errorf("Expected \n%s got \n%s", excpected, surrealPath) + } + } + +} + +func TestNestedBasePaths(t *testing.T) { + type dirSpec struct { + Dir1, Dir2, Dir3 string + } + dirSpecs := []dirSpec{ + dirSpec{Dir1: "/", Dir2: "/", Dir3: "/"}, + dirSpec{Dir1: "/", Dir2: "/path2", Dir3: "/"}, + dirSpec{Dir1: "/path1/dir", Dir2: "/path2/dir/", Dir3: "/path3/dir"}, + dirSpec{Dir1: "C:/path1", Dir2: "path2/dir", Dir3: "/path3/dir/"}, + } + + for _, ds := range dirSpecs { + memFs := NewMemMapFs() + level1Fs := NewBasePathFs(memFs, ds.Dir1) + level2Fs := NewBasePathFs(level1Fs, ds.Dir2) + level3Fs := NewBasePathFs(level2Fs, ds.Dir3) + + type spec struct { + BaseFs Fs + FileName string + } + specs := []spec{ + spec{BaseFs: level3Fs, FileName: "f.txt"}, + spec{BaseFs: level2Fs, FileName: "f.txt"}, + spec{BaseFs: level1Fs, FileName: "f.txt"}, + } + + for _, s := range specs { + if err := s.BaseFs.MkdirAll(s.FileName, 0755); err != nil { + t.Errorf("Got error %s", err.Error()) + } + if _, err := s.BaseFs.Stat(s.FileName); err != nil { + t.Errorf("Got error %s", err.Error()) + } + + if s.BaseFs == level3Fs { + pathToExist := filepath.Join(ds.Dir3, s.FileName) + if _, err := level2Fs.Stat(pathToExist); err != nil { + t.Errorf("Got error %s (path %s)", err.Error(), pathToExist) + } + } else if s.BaseFs == level2Fs { + pathToExist := filepath.Join(ds.Dir2, ds.Dir3, s.FileName) + if _, err := level1Fs.Stat(pathToExist); err != nil { + t.Errorf("Got error %s (path %s)", err.Error(), pathToExist) + } + } + } + } +} + +func TestBasePathOpenFile(t *testing.T) { + baseFs := &MemMapFs{} + baseFs.MkdirAll("/base/path/tmp", 0777) + bp := NewBasePathFs(baseFs, "/base/path") + f, err := bp.OpenFile("/tmp/file.txt", os.O_CREATE, 0600) + if err != nil { + t.Fatalf("failed to open file: %v", err) + } + if filepath.Dir(f.Name()) != filepath.Clean("/tmp") { + t.Fatalf("realpath leaked: %s", f.Name()) + } +} + +func TestBasePathCreate(t *testing.T) { + baseFs := &MemMapFs{} + baseFs.MkdirAll("/base/path/tmp", 0777) + bp := NewBasePathFs(baseFs, "/base/path") + f, err := bp.Create("/tmp/file.txt") + if err != nil { + t.Fatalf("failed to create file: %v", err) + } + if filepath.Dir(f.Name()) != filepath.Clean("/tmp") { + t.Fatalf("realpath leaked: %s", f.Name()) + } +} + +func TestBasePathTempFile(t *testing.T) { + baseFs := &MemMapFs{} + baseFs.MkdirAll("/base/path/tmp", 0777) + bp := NewBasePathFs(baseFs, "/base/path") + + tDir, err := TempDir(bp, "/tmp", "") + if err != nil { + t.Fatalf("Failed to TempDir: %v", err) + } + if filepath.Dir(tDir) != filepath.Clean("/tmp") { + t.Fatalf("Tempdir realpath leaked: %s", tDir) + } + tempFile, err := TempFile(bp, tDir, "") + if err != nil { + t.Fatalf("Failed to TempFile: %v", err) + } + defer tempFile.Close() + if expected, actual := tDir, filepath.Dir(tempFile.Name()); expected != actual { + t.Fatalf("TempFile realpath leaked: expected %s, got %s", expected, actual) + } +} diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 000000000..29a26c67d --- /dev/null +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,290 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + // not present in the overlay, unknown if it exists in the base: + cacheMiss cacheState = iota + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT || os.IsNotExist(err) { + return cacheMiss, nil, nil + } + + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyToLayer(name); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{Base: bfi, Layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{Base: bfh, Layer: lfh}, nil +} diff --git a/vendor/github.com/spf13/afero/composite_test.go b/vendor/github.com/spf13/afero/composite_test.go new file mode 100644 index 000000000..2a20440e3 --- /dev/null +++ b/vendor/github.com/spf13/afero/composite_test.go @@ -0,0 +1,403 @@ +package afero + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + "time" +) + +var tempDirs []string + +func NewTempOsBaseFs(t *testing.T) Fs { + name, err := TempDir(NewOsFs(), "", "") + if err != nil { + t.Error("error creating tempDir", err) + } + + tempDirs = append(tempDirs, name) + + return NewBasePathFs(NewOsFs(), name) +} + +func CleanupTempDirs(t *testing.T) { + osfs := NewOsFs() + type ev struct { + path string + e error + } + + errs := []ev{} + + for _, x := range tempDirs { + err := osfs.RemoveAll(x) + if err != nil { + errs = append(errs, ev{path: x, e: err}) + } + } + + for _, e := range errs { + fmt.Println("error removing tempDir", e.path, e.e) + } + + if len(errs) > 0 { + t.Error("error cleaning up tempDirs") + } + tempDirs = []string{} +} + +func TestUnionCreateExisting(t *testing.T) { + base := &MemMapFs{} + roBase := &ReadOnlyFs{source: base} + ufs := NewCopyOnWriteFs(roBase, &MemMapFs{}) + + base.MkdirAll("/home/test", 0777) + fh, _ := base.Create("/home/test/file.txt") + fh.WriteString("This is a test") + fh.Close() + + fh, err := ufs.OpenFile("/home/test/file.txt", os.O_RDWR, 0666) + if err != nil { + t.Errorf("Failed to open file r/w: %s", err) + } + + _, err = fh.Write([]byte("####")) + if err != nil { + t.Errorf("Failed to write file: %s", err) + } + fh.Seek(0, 0) + data, err := ioutil.ReadAll(fh) + if err != nil { + t.Errorf("Failed to read file: %s", err) + } + if string(data) != "#### is a test" { + t.Errorf("Got wrong data") + } + fh.Close() + + fh, _ = base.Open("/home/test/file.txt") + data, err = ioutil.ReadAll(fh) + if string(data) != "This is a test" { + t.Errorf("Got wrong data in base file") + } + fh.Close() + + fh, err = ufs.Create("/home/test/file.txt") + switch err { + case nil: + if fi, _ := fh.Stat(); fi.Size() != 0 { + t.Errorf("Create did not truncate file") + } + fh.Close() + default: + t.Errorf("Create failed on existing file") + } + +} + +func TestUnionMergeReaddir(t *testing.T) { + base := &MemMapFs{} + roBase := &ReadOnlyFs{source: base} + + ufs := &CopyOnWriteFs{base: roBase, layer: &MemMapFs{}} + + base.MkdirAll("/home/test", 0777) + fh, _ := base.Create("/home/test/file.txt") + fh.WriteString("This is a test") + fh.Close() + + fh, _ = ufs.Create("/home/test/file2.txt") + fh.WriteString("This is a test") + fh.Close() + + fh, _ = ufs.Open("/home/test") + files, err := fh.Readdirnames(-1) + if err != nil { + t.Errorf("Readdirnames failed") + } + if len(files) != 2 { + t.Errorf("Got wrong number of files: %v", files) + } +} + +func TestExistingDirectoryCollisionReaddir(t *testing.T) { + base := &MemMapFs{} + roBase := &ReadOnlyFs{source: base} + overlay := &MemMapFs{} + + ufs := &CopyOnWriteFs{base: roBase, layer: overlay} + + base.MkdirAll("/home/test", 0777) + fh, _ := base.Create("/home/test/file.txt") + fh.WriteString("This is a test") + fh.Close() + + overlay.MkdirAll("home/test", 0777) + fh, _ = overlay.Create("/home/test/file2.txt") + fh.WriteString("This is a test") + fh.Close() + + fh, _ = ufs.Create("/home/test/file3.txt") + fh.WriteString("This is a test") + fh.Close() + + fh, _ = ufs.Open("/home/test") + files, err := fh.Readdirnames(-1) + if err != nil { + t.Errorf("Readdirnames failed") + } + if len(files) != 3 { + t.Errorf("Got wrong number of files in union: %v", files) + } + + fh, _ = overlay.Open("/home/test") + files, err = fh.Readdirnames(-1) + if err != nil { + t.Errorf("Readdirnames failed") + } + if len(files) != 2 { + t.Errorf("Got wrong number of files in overlay: %v", files) + } +} + +func TestNestedDirBaseReaddir(t *testing.T) { + base := &MemMapFs{} + roBase := &ReadOnlyFs{source: base} + overlay := &MemMapFs{} + + ufs := &CopyOnWriteFs{base: roBase, layer: overlay} + + base.MkdirAll("/home/test/foo/bar", 0777) + fh, _ := base.Create("/home/test/file.txt") + fh.WriteString("This is a test") + fh.Close() + + fh, _ = base.Create("/home/test/foo/file2.txt") + fh.WriteString("This is a test") + fh.Close() + fh, _ = base.Create("/home/test/foo/bar/file3.txt") + fh.WriteString("This is a test") + fh.Close() + + overlay.MkdirAll("/", 0777) + + // Opening something only in the base + fh, _ = ufs.Open("/home/test/foo") + list, err := fh.Readdir(-1) + if err != nil { + t.Errorf("Readdir failed %s", err) + } + if len(list) != 2 { + for _, x := range list { + fmt.Println(x.Name()) + } + t.Errorf("Got wrong number of files in union: %v", len(list)) + } +} + +func TestNestedDirOverlayReaddir(t *testing.T) { + base := &MemMapFs{} + roBase := &ReadOnlyFs{source: base} + overlay := &MemMapFs{} + + ufs := &CopyOnWriteFs{base: roBase, layer: overlay} + + base.MkdirAll("/", 0777) + overlay.MkdirAll("/home/test/foo/bar", 0777) + fh, _ := overlay.Create("/home/test/file.txt") + fh.WriteString("This is a test") + fh.Close() + fh, _ = overlay.Create("/home/test/foo/file2.txt") + fh.WriteString("This is a test") + fh.Close() + fh, _ = overlay.Create("/home/test/foo/bar/file3.txt") + fh.WriteString("This is a test") + fh.Close() + + // Opening nested dir only in the overlay + fh, _ = ufs.Open("/home/test/foo") + list, err := fh.Readdir(-1) + if err != nil { + t.Errorf("Readdir failed %s", err) + } + if len(list) != 2 { + for _, x := range list { + fmt.Println(x.Name()) + } + t.Errorf("Got wrong number of files in union: %v", len(list)) + } +} + +func TestNestedDirOverlayOsFsReaddir(t *testing.T) { + defer CleanupTempDirs(t) + base := NewTempOsBaseFs(t) + roBase := &ReadOnlyFs{source: base} + overlay := NewTempOsBaseFs(t) + + ufs := &CopyOnWriteFs{base: roBase, layer: overlay} + + base.MkdirAll("/", 0777) + overlay.MkdirAll("/home/test/foo/bar", 0777) + fh, _ := overlay.Create("/home/test/file.txt") + fh.WriteString("This is a test") + fh.Close() + fh, _ = overlay.Create("/home/test/foo/file2.txt") + fh.WriteString("This is a test") + fh.Close() + fh, _ = overlay.Create("/home/test/foo/bar/file3.txt") + fh.WriteString("This is a test") + fh.Close() + + // Opening nested dir only in the overlay + fh, _ = ufs.Open("/home/test/foo") + list, err := fh.Readdir(-1) + fh.Close() + if err != nil { + t.Errorf("Readdir failed %s", err) + } + if len(list) != 2 { + for _, x := range list { + fmt.Println(x.Name()) + } + t.Errorf("Got wrong number of files in union: %v", len(list)) + } +} + +func TestCopyOnWriteFsWithOsFs(t *testing.T) { + defer CleanupTempDirs(t) + base := NewTempOsBaseFs(t) + roBase := &ReadOnlyFs{source: base} + overlay := NewTempOsBaseFs(t) + + ufs := &CopyOnWriteFs{base: roBase, layer: overlay} + + base.MkdirAll("/home/test", 0777) + fh, _ := base.Create("/home/test/file.txt") + fh.WriteString("This is a test") + fh.Close() + + overlay.MkdirAll("home/test", 0777) + fh, _ = overlay.Create("/home/test/file2.txt") + fh.WriteString("This is a test") + fh.Close() + + fh, _ = ufs.Create("/home/test/file3.txt") + fh.WriteString("This is a test") + fh.Close() + + fh, _ = ufs.Open("/home/test") + files, err := fh.Readdirnames(-1) + fh.Close() + if err != nil { + t.Errorf("Readdirnames failed") + } + if len(files) != 3 { + t.Errorf("Got wrong number of files in union: %v", files) + } + + fh, _ = overlay.Open("/home/test") + files, err = fh.Readdirnames(-1) + fh.Close() + if err != nil { + t.Errorf("Readdirnames failed") + } + if len(files) != 2 { + t.Errorf("Got wrong number of files in overlay: %v", files) + } +} + +func TestUnionCacheWrite(t *testing.T) { + base := &MemMapFs{} + layer := &MemMapFs{} + + ufs := NewCacheOnReadFs(base, layer, 0) + + base.Mkdir("/data", 0777) + + fh, err := ufs.Create("/data/file.txt") + if err != nil { + t.Errorf("Failed to create file") + } + _, err = fh.Write([]byte("This is a test")) + if err != nil { + t.Errorf("Failed to write file") + } + + fh.Seek(0, os.SEEK_SET) + buf := make([]byte, 4) + _, err = fh.Read(buf) + fh.Write([]byte(" IS A")) + fh.Close() + + baseData, _ := ReadFile(base, "/data/file.txt") + layerData, _ := ReadFile(layer, "/data/file.txt") + if string(baseData) != string(layerData) { + t.Errorf("Different data: %s <=> %s", baseData, layerData) + } +} + +func TestUnionCacheExpire(t *testing.T) { + base := &MemMapFs{} + layer := &MemMapFs{} + ufs := &CacheOnReadFs{base: base, layer: layer, cacheTime: 1 * time.Second} + + base.Mkdir("/data", 0777) + + fh, err := ufs.Create("/data/file.txt") + if err != nil { + t.Errorf("Failed to create file") + } + _, err = fh.Write([]byte("This is a test")) + if err != nil { + t.Errorf("Failed to write file") + } + fh.Close() + + fh, _ = base.Create("/data/file.txt") + // sleep some time, so we really get a different time.Now() on write... + time.Sleep(2 * time.Second) + fh.WriteString("Another test") + fh.Close() + + data, _ := ReadFile(ufs, "/data/file.txt") + if string(data) != "Another test" { + t.Errorf("cache time failed: <%s>", data) + } +} + +func TestCacheOnReadFsNotInLayer(t *testing.T) { + base := NewMemMapFs() + layer := NewMemMapFs() + fs := NewCacheOnReadFs(base, layer, 0) + + fh, err := base.Create("/file.txt") + if err != nil { + t.Fatal("unable to create file: ", err) + } + + txt := []byte("This is a test") + fh.Write(txt) + fh.Close() + + fh, err = fs.Open("/file.txt") + if err != nil { + t.Fatal("could not open file: ", err) + } + + b, err := ReadAll(fh) + fh.Close() + + if err != nil { + t.Fatal("could not read file: ", err) + } else if !bytes.Equal(txt, b) { + t.Fatalf("wanted file text %q, got %q", txt, b) + } + + fh, err = layer.Open("/file.txt") + if err != nil { + t.Fatal("could not open file from layer: ", err) + } + fh.Close() +} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 000000000..5728243d9 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 000000000..968fc2783 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,25 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !darwin +// +build !openbsd +// +build !freebsd +// +build !dragonfly +// +build !netbsd + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 000000000..9aef39794 --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,292 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +var _ Lstater = (*CopyOnWriteFs)(nil) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes() and Chmod()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + isNotExist := u.isNotExist(err) + if isNotExist { + return u.base.Stat(name) + } + return nil, err + } + return fi, nil +} + +func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + llayer, ok1 := u.layer.(Lstater) + lbase, ok2 := u.base.(Lstater) + + if ok1 { + fi, b, err := llayer.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + + if !u.isNotExist(err) { + return nil, b, err + } + } + + if ok2 { + fi, b, err := lbase.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + if !u.isNotExist(err) { + return nil, b, err + } + } + + fi, err := u.Stat(name) + + return fi, false, err +} + +func (u *CopyOnWriteFs) isNotExist(err error) bool { + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { + return true + } + return false +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return syscall.EEXIST + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return syscall.EEXIST + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) +} diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs_test.go b/vendor/github.com/spf13/afero/copyOnWriteFs_test.go new file mode 100644 index 000000000..0f199dce0 --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs_test.go @@ -0,0 +1,40 @@ +package afero + +import "testing" + +func TestCopyOnWrite(t *testing.T) { + var fs Fs + var err error + base := NewOsFs() + roBase := NewReadOnlyFs(base) + ufs := NewCopyOnWriteFs(roBase, NewMemMapFs()) + + fs = ufs + err = fs.MkdirAll("nonexistent/directory/", 0744) + if err != nil { + t.Error(err) + return + } + _, err = fs.Create("nonexistent/directory/newfile") + if err != nil { + t.Error(err) + return + } + +} + +func TestCopyOnWriteFileInMemMapBase(t *testing.T) { + base := &MemMapFs{} + layer := &MemMapFs{} + + if err := WriteFile(base, "base.txt", []byte("base"), 0755); err != nil { + t.Fatalf("Failed to write file: %s", err) + } + + ufs := NewCopyOnWriteFs(base, layer) + + _, err := ufs.Stat("base.txt") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 000000000..c42193688 --- /dev/null +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 000000000..5c3a3d8ff --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,230 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, prefix string) (f File, err error) { + return TempFile(a.Fs, dir, prefix) +} + +func TempFile(fs Fs, dir, prefix string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.Mkdir(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/spf13/afero/ioutil_test.go b/vendor/github.com/spf13/afero/ioutil_test.go new file mode 100644 index 000000000..e7c9f0698 --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil_test.go @@ -0,0 +1,112 @@ +// ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import "testing" + +func checkSizePath(t *testing.T, path string, size int64) { + dir, err := testFS.Stat(path) + if err != nil { + t.Fatalf("Stat %q (looking for size %d): %s", path, size, err) + } + if dir.Size() != size { + t.Errorf("Stat %q: size %d want %d", path, dir.Size(), size) + } +} + +func TestReadFile(t *testing.T) { + testFS = &MemMapFs{} + fsutil := &Afero{Fs: testFS} + + testFS.Create("this_exists.go") + filename := "rumpelstilzchen" + contents, err := fsutil.ReadFile(filename) + if err == nil { + t.Fatalf("ReadFile %s: error expected, none found", filename) + } + + filename = "this_exists.go" + contents, err = fsutil.ReadFile(filename) + if err != nil { + t.Fatalf("ReadFile %s: %v", filename, err) + } + + checkSizePath(t, filename, int64(len(contents))) +} + +func TestWriteFile(t *testing.T) { + testFS = &MemMapFs{} + fsutil := &Afero{Fs: testFS} + f, err := fsutil.TempFile("", "ioutil-test") + if err != nil { + t.Fatal(err) + } + filename := f.Name() + data := "Programming today is a race between software engineers striving to " + + "build bigger and better idiot-proof programs, and the Universe trying " + + "to produce bigger and better idiots. So far, the Universe is winning." + + if err := fsutil.WriteFile(filename, []byte(data), 0644); err != nil { + t.Fatalf("WriteFile %s: %v", filename, err) + } + + contents, err := fsutil.ReadFile(filename) + if err != nil { + t.Fatalf("ReadFile %s: %v", filename, err) + } + + if string(contents) != data { + t.Fatalf("contents = %q\nexpected = %q", string(contents), data) + } + + // cleanup + f.Close() + testFS.Remove(filename) // ignore error +} + +func TestReadDir(t *testing.T) { + testFS = &MemMapFs{} + testFS.Mkdir("/i-am-a-dir", 0777) + testFS.Create("/this_exists.go") + dirname := "rumpelstilzchen" + _, err := ReadDir(testFS, dirname) + if err == nil { + t.Fatalf("ReadDir %s: error expected, none found", dirname) + } + + dirname = ".." + list, err := ReadDir(testFS, dirname) + if err != nil { + t.Fatalf("ReadDir %s: %v", dirname, err) + } + + foundFile := false + foundSubDir := false + for _, dir := range list { + switch { + case !dir.IsDir() && dir.Name() == "this_exists.go": + foundFile = true + case dir.IsDir() && dir.Name() == "i-am-a-dir": + foundSubDir = true + } + } + if !foundFile { + t.Fatalf("ReadDir %s: this_exists.go file not found", dirname) + } + if !foundSubDir { + t.Fatalf("ReadDir %s: i-am-a-dir directory not found", dirname) + } +} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go new file mode 100644 index 000000000..89c1bfc0a --- /dev/null +++ b/vendor/github.com/spf13/afero/lstater.go @@ -0,0 +1,27 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" +) + +// Lstater is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// Else it will call Stat. +// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +type Lstater interface { + LstatIfPossible(name string) (os.FileInfo, bool, error) +} diff --git a/vendor/github.com/spf13/afero/lstater_test.go b/vendor/github.com/spf13/afero/lstater_test.go new file mode 100644 index 000000000..477924f64 --- /dev/null +++ b/vendor/github.com/spf13/afero/lstater_test.go @@ -0,0 +1,102 @@ +// Copyright ©2018 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "testing" +) + +func TestLstatIfPossible(t *testing.T) { + wd, _ := os.Getwd() + defer func() { + os.Chdir(wd) + }() + + osFs := &OsFs{} + + workDir, err := TempDir(osFs, "", "afero-lstate") + if err != nil { + t.Fatal(err) + } + + defer func() { + osFs.RemoveAll(workDir) + }() + + memWorkDir := "/lstate" + + memFs := NewMemMapFs() + overlayFs1 := &CopyOnWriteFs{base: osFs, layer: memFs} + overlayFs2 := &CopyOnWriteFs{base: memFs, layer: osFs} + overlayFsMemOnly := &CopyOnWriteFs{base: memFs, layer: NewMemMapFs()} + basePathFs := &BasePathFs{source: osFs, path: workDir} + basePathFsMem := &BasePathFs{source: memFs, path: memWorkDir} + roFs := &ReadOnlyFs{source: osFs} + roFsMem := &ReadOnlyFs{source: memFs} + + pathFileMem := filepath.Join(memWorkDir, "aferom.txt") + + WriteFile(osFs, filepath.Join(workDir, "afero.txt"), []byte("Hi, Afero!"), 0777) + WriteFile(memFs, filepath.Join(pathFileMem), []byte("Hi, Afero!"), 0777) + + os.Chdir(workDir) + if err := os.Symlink("afero.txt", "symafero.txt"); err != nil { + t.Fatal(err) + } + + pathFile := filepath.Join(workDir, "afero.txt") + pathSymlink := filepath.Join(workDir, "symafero.txt") + + checkLstat := func(l Lstater, name string, shouldLstat bool) os.FileInfo { + statFile, isLstat, err := l.LstatIfPossible(name) + if err != nil { + t.Fatalf("Lstat check failed: %s", err) + } + if isLstat != shouldLstat { + t.Fatalf("Lstat status was %t for %s", isLstat, name) + } + return statFile + } + + testLstat := func(l Lstater, pathFile, pathSymlink string) { + shouldLstat := pathSymlink != "" + statRegular := checkLstat(l, pathFile, shouldLstat) + statSymlink := checkLstat(l, pathSymlink, shouldLstat) + if statRegular == nil || statSymlink == nil { + t.Fatal("got nil FileInfo") + } + + symSym := statSymlink.Mode()&os.ModeSymlink == os.ModeSymlink + if symSym == (pathSymlink == "") { + t.Fatal("expected the FileInfo to describe the symlink") + } + + _, _, err := l.LstatIfPossible("this-should-not-exist.txt") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("expected file to not exist, got %s", err) + } + } + + testLstat(osFs, pathFile, pathSymlink) + testLstat(overlayFs1, pathFile, pathSymlink) + testLstat(overlayFs2, pathFile, pathSymlink) + testLstat(basePathFs, "afero.txt", "symafero.txt") + testLstat(overlayFsMemOnly, pathFileMem, "") + testLstat(basePathFsMem, "aferom.txt", "") + testLstat(roFs, pathFile, pathSymlink) + testLstat(roFsMem, pathFileMem, "") +} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go new file mode 100644 index 000000000..c18a87fb7 --- /dev/null +++ b/vendor/github.com/spf13/afero/match.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "path/filepath" + "sort" + "strings" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// This was adapted from (http://golang.org/pkg/path/filepath) and uses several +// built-ins from that package. +func Glob(fs Fs, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + // Lstat not supported by a ll filesystems. + if _, err = lstatIfPossible(fs, pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + switch dir { + case "": + dir = "." + case string(filepath.Separator): + // nothing + default: + dir = dir[0 : len(dir)-1] // chop off trailing separator + } + + if !hasMeta(dir) { + return glob(fs, dir, file, nil) + } + + var m []string + m, err = Glob(fs, dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + d, err := fs.Open(dir) + if err != nil { + return + } + defer d.Close() + + names, _ := d.Readdirnames(-1) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.IndexAny(path, "*?[") >= 0 +} diff --git a/vendor/github.com/spf13/afero/match_test.go b/vendor/github.com/spf13/afero/match_test.go new file mode 100644 index 000000000..21e1faecd --- /dev/null +++ b/vendor/github.com/spf13/afero/match_test.go @@ -0,0 +1,183 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "runtime" + "testing" +) + +// contains returns true if vector contains the string s. +func contains(vector []string, s string) bool { + for _, elem := range vector { + if elem == s { + return true + } + } + return false +} + +func setupGlobDirRoot(t *testing.T, fs Fs) string { + path := testDir(fs) + setupGlobFiles(t, fs, path) + return path +} + +func setupGlobDirReusePath(t *testing.T, fs Fs, path string) string { + testRegistry[fs] = append(testRegistry[fs], path) + return setupGlobFiles(t, fs, path) +} + +func setupGlobFiles(t *testing.T, fs Fs, path string) string { + testSubDir := filepath.Join(path, "globs", "bobs") + err := fs.MkdirAll(testSubDir, 0700) + if err != nil && !os.IsExist(err) { + t.Fatal(err) + } + + f, err := fs.Create(filepath.Join(testSubDir, "/matcher")) + if err != nil { + t.Fatal(err) + } + f.WriteString("Testfile 1 content") + f.Close() + + f, err = fs.Create(filepath.Join(testSubDir, "/../submatcher")) + if err != nil { + t.Fatal(err) + } + f.WriteString("Testfile 2 content") + f.Close() + + f, err = fs.Create(filepath.Join(testSubDir, "/../../match")) + if err != nil { + t.Fatal(err) + } + f.WriteString("Testfile 3 content") + f.Close() + + return testSubDir +} + +func TestGlob(t *testing.T) { + defer removeAllTestFiles(t) + var testDir string + for i, fs := range Fss { + if i == 0 { + testDir = setupGlobDirRoot(t, fs) + } else { + setupGlobDirReusePath(t, fs, testDir) + } + } + + var globTests = []struct { + pattern, result string + }{ + {testDir + "/globs/bobs/matcher", testDir + "/globs/bobs/matcher"}, + {testDir + "/globs/*/mat?her", testDir + "/globs/bobs/matcher"}, + {testDir + "/globs/bobs/../*", testDir + "/globs/submatcher"}, + {testDir + "/match", testDir + "/match"}, + } + + for _, fs := range Fss { + + for _, tt := range globTests { + pattern := tt.pattern + result := tt.result + if runtime.GOOS == "windows" { + pattern = filepath.Clean(pattern) + result = filepath.Clean(result) + } + matches, err := Glob(fs, pattern) + if err != nil { + t.Errorf("Glob error for %q: %s", pattern, err) + continue + } + if !contains(matches, result) { + t.Errorf("Glob(%#q) = %#v want %v", pattern, matches, result) + } + } + for _, pattern := range []string{"no_match", "../*/no_match"} { + matches, err := Glob(fs, pattern) + if err != nil { + t.Errorf("Glob error for %q: %s", pattern, err) + continue + } + if len(matches) != 0 { + t.Errorf("Glob(%#q) = %#v want []", pattern, matches) + } + } + + } +} + +func TestGlobSymlink(t *testing.T) { + defer removeAllTestFiles(t) + + fs := &OsFs{} + testDir := setupGlobDirRoot(t, fs) + + err := os.Symlink("target", filepath.Join(testDir, "symlink")) + if err != nil { + t.Skipf("skipping on %s", runtime.GOOS) + } + + var globSymlinkTests = []struct { + path, dest string + brokenLink bool + }{ + {"test1", "link1", false}, + {"test2", "link2", true}, + } + + for _, tt := range globSymlinkTests { + path := filepath.Join(testDir, tt.path) + dest := filepath.Join(testDir, tt.dest) + f, err := fs.Create(path) + if err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + err = os.Symlink(path, dest) + if err != nil { + t.Fatal(err) + } + if tt.brokenLink { + // Break the symlink. + fs.Remove(path) + } + matches, err := Glob(fs, dest) + if err != nil { + t.Errorf("GlobSymlink error for %q: %s", dest, err) + } + if !contains(matches, dest) { + t.Errorf("Glob(%#q) = %#v want %v", dest, matches, dest) + } + } +} + + +func TestGlobError(t *testing.T) { + for _, fs := range Fss { + _, err := Glob(fs, "[7]") + if err != nil { + t.Error("expected error for bad pattern; got none") + } + } +} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 000000000..e104013f4 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 000000000..03a57ee5b --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 000000000..885e55429 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,314 @@ +// Copyright © 2015 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" +) + +import "time" + +const FilePathSeparator = string(filepath.Separator) + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time +} + +func (d *FileData) Name() string { + d.Lock() + defer d.Unlock() + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true} +} + +func ChangeFileName(f *FileData, newname string) { + f.Lock() + f.name = newname + f.Unlock() +} + +func SetMode(f *FileData, mode os.FileMode) { + f.Lock() + f.mode = mode + f.Unlock() +} + +func SetModTime(f *FileData, mtime time.Time) { + f.Lock() + setModTime(f, mtime) + f.Unlock() +} + +func setModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + setModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.fileData.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed == true { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if int(f.at) > len(f.fileData.data) { + return 0, io.ErrUnexpectedEOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Read(b) +} + +func (f *File) Truncate(size int64) error { + if f.closed == true { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + setModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + switch whence { + case 0: + atomic.StoreInt64(&f.at, offset) + case 1: + atomic.AddInt64(&f.at, int64(offset)) + case 2: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.readOnly { + return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + setModTime(f.fileData, time.Now()) + + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + s.Lock() + _, name := filepath.Split(s.name) + s.Unlock() + return name +} +func (s *FileInfo) Mode() os.FileMode { + s.Lock() + defer s.Unlock() + return s.mode +} +func (s *FileInfo) ModTime() time.Time { + s.Lock() + defer s.Unlock() + return s.modtime +} +func (s *FileInfo) IsDir() bool { + s.Lock() + defer s.Unlock() + return s.dir +} +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + s.Lock() + defer s.Unlock() + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/mem/file_test.go b/vendor/github.com/spf13/afero/mem/file_test.go new file mode 100644 index 000000000..5769067a7 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file_test.go @@ -0,0 +1,154 @@ +package mem + +import ( + "testing" + "time" +) + +func TestFileDataNameRace(t *testing.T) { + t.Parallel() + const someName = "someName" + const someOtherName = "someOtherName" + d := FileData{ + name: someName, + } + + if d.Name() != someName { + t.Errorf("Failed to read correct Name, was %v", d.Name()) + } + + ChangeFileName(&d, someOtherName) + if d.Name() != someOtherName { + t.Errorf("Failed to set Name, was %v", d.Name()) + } + + go func() { + ChangeFileName(&d, someName) + }() + + if d.Name() != someName && d.Name() != someOtherName { + t.Errorf("Failed to read either Name, was %v", d.Name()) + } +} + +func TestFileDataModTimeRace(t *testing.T) { + t.Parallel() + someTime := time.Now() + someOtherTime := someTime.Add(1 * time.Minute) + + d := FileData{ + modtime: someTime, + } + + s := FileInfo{ + FileData: &d, + } + + if s.ModTime() != someTime { + t.Errorf("Failed to read correct value, was %v", s.ModTime()) + } + + SetModTime(&d, someOtherTime) + if s.ModTime() != someOtherTime { + t.Errorf("Failed to set ModTime, was %v", s.ModTime()) + } + + go func() { + SetModTime(&d, someTime) + }() + + if s.ModTime() != someTime && s.ModTime() != someOtherTime { + t.Errorf("Failed to read either modtime, was %v", s.ModTime()) + } +} + +func TestFileDataModeRace(t *testing.T) { + t.Parallel() + const someMode = 0777 + const someOtherMode = 0660 + + d := FileData{ + mode: someMode, + } + + s := FileInfo{ + FileData: &d, + } + + if s.Mode() != someMode { + t.Errorf("Failed to read correct value, was %v", s.Mode()) + } + + SetMode(&d, someOtherMode) + if s.Mode() != someOtherMode { + t.Errorf("Failed to set Mode, was %v", s.Mode()) + } + + go func() { + SetMode(&d, someMode) + }() + + if s.Mode() != someMode && s.Mode() != someOtherMode { + t.Errorf("Failed to read either mode, was %v", s.Mode()) + } +} + +func TestFileDataIsDirRace(t *testing.T) { + t.Parallel() + + d := FileData{ + dir: true, + } + + s := FileInfo{ + FileData: &d, + } + + if s.IsDir() != true { + t.Errorf("Failed to read correct value, was %v", s.IsDir()) + } + + go func() { + s.Lock() + d.dir = false + s.Unlock() + }() + + //just logging the value to trigger a read: + t.Logf("Value is %v", s.IsDir()) +} + +func TestFileDataSizeRace(t *testing.T) { + t.Parallel() + + const someData = "Hello" + const someOtherDataSize = "Hello World" + + d := FileData{ + data: []byte(someData), + dir: false, + } + + s := FileInfo{ + FileData: &d, + } + + if s.Size() != int64(len(someData)) { + t.Errorf("Failed to read correct value, was %v", s.Size()) + } + + go func() { + s.Lock() + d.data = []byte(someOtherDataSize) + s.Unlock() + }() + + //just logging the value to trigger a read: + t.Logf("Value is %v", s.Size()) + + //Testing the Dir size case + d.dir = true + if s.Size() != int64(42) { + t.Errorf("Failed to read correct value for dir, was %v", s.Size()) + } +} diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 000000000..09498e70f --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,365 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + }) + return m.data +} + +func (*MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + + parent.Lock() + mem.RemoveFromMemDir(parent, f) + parent.Unlock() + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, 0777) + if err != nil { + //log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + //log.Println("Open after Mkdir error:", err) + return + } + } + + parent.Lock() + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) + parent.Unlock() +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{FileData: x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + + m.mu.Lock() + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + m.mu.Unlock() + + m.Chmod(name, perm|os.ModeDir) + + return nil +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } + return err + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + chmod := false + file, err := m.openWrite(name) + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + chmod = true + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + if chmod { + m.Chmod(name, perm) + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + delete(m.getData(), name) + } else { + return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p, _ := range m.getData() { + if strings.HasPrefix(p, path) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} + } + return nil +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{FileData: x} + fmt.Println(x.Name(), y.Size()) + } +} + +// func debugMemMapList(fs Fs) { +// if x, ok := fs.(*MemMapFs); ok { +// x.List() +// } +// } diff --git a/vendor/github.com/spf13/afero/memmap_test.go b/vendor/github.com/spf13/afero/memmap_test.go new file mode 100644 index 000000000..47414ab14 --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap_test.go @@ -0,0 +1,451 @@ +package afero + +import ( + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "testing" + "time" +) + +func TestNormalizePath(t *testing.T) { + type test struct { + input string + expected string + } + + data := []test{ + {".", FilePathSeparator}, + {"./", FilePathSeparator}, + {"..", FilePathSeparator}, + {"../", FilePathSeparator}, + {"./..", FilePathSeparator}, + {"./../", FilePathSeparator}, + } + + for i, d := range data { + cpath := normalizePath(d.input) + if d.expected != cpath { + t.Errorf("Test %d failed. Expected %q got %q", i, d.expected, cpath) + } + } +} + +func TestPathErrors(t *testing.T) { + path := filepath.Join(".", "some", "path") + path2 := filepath.Join(".", "different", "path") + fs := NewMemMapFs() + perm := os.FileMode(0755) + + // relevant functions: + // func (m *MemMapFs) Chmod(name string, mode os.FileMode) error + // func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error + // func (m *MemMapFs) Create(name string) (File, error) + // func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error + // func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error + // func (m *MemMapFs) Open(name string) (File, error) + // func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) + // func (m *MemMapFs) Remove(name string) error + // func (m *MemMapFs) Rename(oldname, newname string) error + // func (m *MemMapFs) Stat(name string) (os.FileInfo, error) + + err := fs.Chmod(path, perm) + checkPathError(t, err, "Chmod") + + err = fs.Chtimes(path, time.Now(), time.Now()) + checkPathError(t, err, "Chtimes") + + // fs.Create doesn't return an error + + err = fs.Mkdir(path2, perm) + if err != nil { + t.Error(err) + } + err = fs.Mkdir(path2, perm) + checkPathError(t, err, "Mkdir") + + err = fs.MkdirAll(path2, perm) + if err != nil { + t.Error("MkdirAll:", err) + } + + _, err = fs.Open(path) + checkPathError(t, err, "Open") + + _, err = fs.OpenFile(path, os.O_RDWR, perm) + checkPathError(t, err, "OpenFile") + + err = fs.Remove(path) + checkPathError(t, err, "Remove") + + err = fs.RemoveAll(path) + if err != nil { + t.Error("RemoveAll:", err) + } + + err = fs.Rename(path, path2) + checkPathError(t, err, "Rename") + + _, err = fs.Stat(path) + checkPathError(t, err, "Stat") +} + +func checkPathError(t *testing.T, err error, op string) { + pathErr, ok := err.(*os.PathError) + if !ok { + t.Error(op+":", err, "is not a os.PathError") + return + } + _, ok = pathErr.Err.(*os.PathError) + if ok { + t.Error(op+":", err, "contains another os.PathError") + } +} + +// Ensure Permissions are set on OpenFile/Mkdir/MkdirAll +func TestPermSet(t *testing.T) { + const fileName = "/myFileTest" + const dirPath = "/myDirTest" + const dirPathAll = "/my/path/to/dir" + + const fileMode = os.FileMode(0765) + // directories will also have the directory bit set + const dirMode = fileMode | os.ModeDir + + fs := NewMemMapFs() + + // Test Openfile + f, err := fs.OpenFile(fileName, os.O_CREATE, fileMode) + if err != nil { + t.Errorf("OpenFile Create failed: %s", err) + return + } + f.Close() + + s, err := fs.Stat(fileName) + if err != nil { + t.Errorf("Stat failed: %s", err) + return + } + if s.Mode().String() != fileMode.String() { + t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), fileMode.String()) + return + } + + // Test Mkdir + err = fs.Mkdir(dirPath, dirMode) + if err != nil { + t.Errorf("MkDir Create failed: %s", err) + return + } + s, err = fs.Stat(dirPath) + if err != nil { + t.Errorf("Stat failed: %s", err) + return + } + // sets File + if s.Mode().String() != dirMode.String() { + t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), dirMode.String()) + return + } + + // Test MkdirAll + err = fs.MkdirAll(dirPathAll, dirMode) + if err != nil { + t.Errorf("MkDir Create failed: %s", err) + return + } + s, err = fs.Stat(dirPathAll) + if err != nil { + t.Errorf("Stat failed: %s", err) + return + } + if s.Mode().String() != dirMode.String() { + t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), dirMode.String()) + return + } +} + +// Fails if multiple file objects use the same file.at counter in MemMapFs +func TestMultipleOpenFiles(t *testing.T) { + defer removeAllTestFiles(t) + const fileName = "afero-demo2.txt" + + var data = make([][]byte, len(Fss)) + + for i, fs := range Fss { + dir := testDir(fs) + path := filepath.Join(dir, fileName) + fh1, err := fs.Create(path) + if err != nil { + t.Error("fs.Create failed: " + err.Error()) + } + _, err = fh1.Write([]byte("test")) + if err != nil { + t.Error("fh.Write failed: " + err.Error()) + } + _, err = fh1.Seek(0, os.SEEK_SET) + if err != nil { + t.Error(err) + } + + fh2, err := fs.OpenFile(path, os.O_RDWR, 0777) + if err != nil { + t.Error("fs.OpenFile failed: " + err.Error()) + } + _, err = fh2.Seek(0, os.SEEK_END) + if err != nil { + t.Error(err) + } + _, err = fh2.Write([]byte("data")) + if err != nil { + t.Error(err) + } + err = fh2.Close() + if err != nil { + t.Error(err) + } + + _, err = fh1.Write([]byte("data")) + if err != nil { + t.Error(err) + } + err = fh1.Close() + if err != nil { + t.Error(err) + } + // the file now should contain "datadata" + data[i], err = ReadFile(fs, path) + if err != nil { + t.Error(err) + } + } + + for i, fs := range Fss { + if i == 0 { + continue + } + if string(data[0]) != string(data[i]) { + t.Errorf("%s and %s don't behave the same\n"+ + "%s: \"%s\"\n%s: \"%s\"\n", + Fss[0].Name(), fs.Name(), Fss[0].Name(), data[0], fs.Name(), data[i]) + } + } +} + +// Test if file.Write() fails when opened as read only +func TestReadOnly(t *testing.T) { + defer removeAllTestFiles(t) + const fileName = "afero-demo.txt" + + for _, fs := range Fss { + dir := testDir(fs) + path := filepath.Join(dir, fileName) + + f, err := fs.Create(path) + if err != nil { + t.Error(fs.Name()+":", "fs.Create failed: "+err.Error()) + } + _, err = f.Write([]byte("test")) + if err != nil { + t.Error(fs.Name()+":", "Write failed: "+err.Error()) + } + f.Close() + + f, err = fs.Open(path) + if err != nil { + t.Error("fs.Open failed: " + err.Error()) + } + _, err = f.Write([]byte("data")) + if err == nil { + t.Error(fs.Name()+":", "No write error") + } + f.Close() + + f, err = fs.OpenFile(path, os.O_RDONLY, 0644) + if err != nil { + t.Error("fs.Open failed: " + err.Error()) + } + _, err = f.Write([]byte("data")) + if err == nil { + t.Error(fs.Name()+":", "No write error") + } + f.Close() + } +} + +func TestWriteCloseTime(t *testing.T) { + defer removeAllTestFiles(t) + const fileName = "afero-demo.txt" + + for _, fs := range Fss { + dir := testDir(fs) + path := filepath.Join(dir, fileName) + + f, err := fs.Create(path) + if err != nil { + t.Error(fs.Name()+":", "fs.Create failed: "+err.Error()) + } + f.Close() + + f, err = fs.Create(path) + if err != nil { + t.Error(fs.Name()+":", "fs.Create failed: "+err.Error()) + } + fi, err := f.Stat() + if err != nil { + t.Error(fs.Name()+":", "Stat failed: "+err.Error()) + } + timeBefore := fi.ModTime() + + // sorry for the delay, but we have to make sure time advances, + // also on non Un*x systems... + switch runtime.GOOS { + case "windows": + time.Sleep(2 * time.Second) + case "darwin": + time.Sleep(1 * time.Second) + default: // depending on the FS, this may work with < 1 second, on my old ext3 it does not + time.Sleep(1 * time.Second) + } + + _, err = f.Write([]byte("test")) + if err != nil { + t.Error(fs.Name()+":", "Write failed: "+err.Error()) + } + f.Close() + fi, err = fs.Stat(path) + if err != nil { + t.Error(fs.Name()+":", "fs.Stat failed: "+err.Error()) + } + if fi.ModTime().Equal(timeBefore) { + t.Error(fs.Name()+":", "ModTime was not set on Close()") + } + } +} + +// This test should be run with the race detector on: +// go test -race -v -timeout 10s -run TestRacingDeleteAndClose +func TestRacingDeleteAndClose(t *testing.T) { + fs := NewMemMapFs() + pathname := "testfile" + f, err := fs.Create(pathname) + if err != nil { + t.Fatal(err) + } + + in := make(chan bool) + + go func() { + <-in + f.Close() + }() + go func() { + <-in + fs.Remove(pathname) + }() + close(in) +} + +// This test should be run with the race detector on: +// go test -run TestMemFsDataRace -race +func TestMemFsDataRace(t *testing.T) { + const dir = "test_dir" + fs := NewMemMapFs() + + if err := fs.MkdirAll(dir, 0777); err != nil { + t.Fatal(err) + } + + const n = 1000 + done := make(chan struct{}) + + go func() { + defer close(done) + for i := 0; i < n; i++ { + fname := filepath.Join(dir, fmt.Sprintf("%d.txt", i)) + if err := WriteFile(fs, fname, []byte(""), 0777); err != nil { + panic(err) + } + if err := fs.Remove(fname); err != nil { + panic(err) + } + } + }() + +loop: + for { + select { + case <-done: + break loop + default: + _, err := ReadDir(fs, dir) + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestMemFsDirMode(t *testing.T) { + fs := NewMemMapFs() + err := fs.Mkdir("/testDir1", 0644) + if err != nil { + t.Error(err) + } + err = fs.MkdirAll("/sub/testDir2", 0644) + if err != nil { + t.Error(err) + } + info, err := fs.Stat("/testDir1") + if err != nil { + t.Error(err) + } + if !info.IsDir() { + t.Error("should be a directory") + } + if !info.Mode().IsDir() { + t.Error("FileMode is not directory") + } + info, err = fs.Stat("/sub/testDir2") + if err != nil { + t.Error(err) + } + if !info.IsDir() { + t.Error("should be a directory") + } + if !info.Mode().IsDir() { + t.Error("FileMode is not directory") + } +} + +func TestMemFsUnexpectedEOF(t *testing.T) { + t.Parallel() + + fs := NewMemMapFs() + + if err := WriteFile(fs, "file.txt", []byte("abc"), 0777); err != nil { + t.Fatal(err) + } + + f, err := fs.Open("file.txt") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + // Seek beyond the end. + _, err = f.Seek(512, 0) + if err != nil { + t.Fatal(err) + } + + buff := make([]byte, 256) + _, err = io.ReadAtLeast(f, buff, 256) + + if err != io.ErrUnexpectedEOF { + t.Fatal("Expected ErrUnexpectedEOF") + } +} diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go new file mode 100644 index 000000000..13cc1b84c --- /dev/null +++ b/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,101 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +var _ Lstater = (*OsFs)(nil) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fi, err := os.Lstat(name) + return fi, true, err +} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go new file mode 100644 index 000000000..18f60a0f6 --- /dev/null +++ b/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,106 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfPossible(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem supports it, use Lstat, else use fs.Stat +func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { + if lfs, ok := fs.(Lstater); ok { + fi, _, err := lfs.LstatIfPossible(path) + return fi, err + } + return fs.Stat(path) +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfPossible(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/vendor/github.com/spf13/afero/path_test.go b/vendor/github.com/spf13/afero/path_test.go new file mode 100644 index 000000000..104a6bcbe --- /dev/null +++ b/vendor/github.com/spf13/afero/path_test.go @@ -0,0 +1,69 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "os" + "testing" +) + +func TestWalk(t *testing.T) { + defer removeAllTestFiles(t) + var testDir string + for i, fs := range Fss { + if i == 0 { + testDir = setupTestDirRoot(t, fs) + } else { + setupTestDirReusePath(t, fs, testDir) + } + } + + outputs := make([]string, len(Fss)) + for i, fs := range Fss { + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Error("walkFn err:", err) + } + var size int64 + if !info.IsDir() { + size = info.Size() + } + outputs[i] += fmt.Sprintln(path, info.Name(), size, info.IsDir(), err) + return nil + } + err := Walk(fs, testDir, walkFn) + if err != nil { + t.Error(err) + } + } + fail := false + for i, o := range outputs { + if i == 0 { + continue + } + if o != outputs[i-1] { + fail = true + break + } + } + if fail { + t.Log("Walk outputs not equal!") + for i, o := range outputs { + t.Log(Fss[i].Name() + "\n" + o) + } + t.Fail() + } +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 000000000..c6376ec37 --- /dev/null +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,80 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +var _ Lstater = (*ReadOnlyFs)(nil) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + if lsf, ok := r.source.(Lstater); ok { + return lsf.LstatIfPossible(name) + } + fi, err := r.Stat(name) + return fi, false, err +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 000000000..9d92dbc05 --- /dev/null +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,214 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +// +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/vendor/github.com/spf13/afero/ro_regexp_test.go b/vendor/github.com/spf13/afero/ro_regexp_test.go new file mode 100644 index 000000000..ef8a35d0d --- /dev/null +++ b/vendor/github.com/spf13/afero/ro_regexp_test.go @@ -0,0 +1,96 @@ +package afero + +import ( + "regexp" + "testing" +) + +func TestFilterReadOnly(t *testing.T) { + fs := &ReadOnlyFs{source: &MemMapFs{}} + _, err := fs.Create("/file.txt") + if err == nil { + t.Errorf("Did not fail to create file") + } + // t.Logf("ERR=%s", err) +} + +func TestFilterReadonlyRemoveAndRead(t *testing.T) { + mfs := &MemMapFs{} + fh, err := mfs.Create("/file.txt") + fh.Write([]byte("content here")) + fh.Close() + + fs := NewReadOnlyFs(mfs) + err = fs.Remove("/file.txt") + if err == nil { + t.Errorf("Did not fail to remove file") + } + + fh, err = fs.Open("/file.txt") + if err != nil { + t.Errorf("Failed to open file: %s", err) + } + + buf := make([]byte, len("content here")) + _, err = fh.Read(buf) + fh.Close() + if string(buf) != "content here" { + t.Errorf("Failed to read file: %s", err) + } + + err = mfs.Remove("/file.txt") + if err != nil { + t.Errorf("Failed to remove file") + } + + fh, err = fs.Open("/file.txt") + if err == nil { + fh.Close() + t.Errorf("File still present") + } +} + +func TestFilterRegexp(t *testing.T) { + fs := NewRegexpFs(&MemMapFs{}, regexp.MustCompile(`\.txt$`)) + _, err := fs.Create("/file.html") + if err == nil { + + t.Errorf("Did not fail to create file") + } + // t.Logf("ERR=%s", err) +} + +func TestFilterRORegexpChain(t *testing.T) { + rofs := &ReadOnlyFs{source: &MemMapFs{}} + fs := &RegexpFs{re: regexp.MustCompile(`\.txt$`), source: rofs} + _, err := fs.Create("/file.txt") + if err == nil { + t.Errorf("Did not fail to create file") + } + // t.Logf("ERR=%s", err) +} + +func TestFilterRegexReadDir(t *testing.T) { + mfs := &MemMapFs{} + fs1 := &RegexpFs{re: regexp.MustCompile(`\.txt$`), source: mfs} + fs := &RegexpFs{re: regexp.MustCompile(`^a`), source: fs1} + + mfs.MkdirAll("/dir/sub", 0777) + for _, name := range []string{"afile.txt", "afile.html", "bfile.txt"} { + for _, dir := range []string{"/dir/", "/dir/sub/"} { + fh, _ := mfs.Create(dir + name) + fh.Close() + } + } + + files, _ := ReadDir(fs, "/dir") + if len(files) != 2 { // afile.txt, sub + t.Errorf("Got wrong number of files: %#v", files) + } + + f, _ := fs.Open("/dir/sub") + names, _ := f.Readdirnames(-1) + if len(names) != 1 { + t.Errorf("Got wrong number of names: %v", names) + } +} diff --git a/vendor/github.com/spf13/afero/sftpfs/file.go b/vendor/github.com/spf13/afero/sftpfs/file.go new file mode 100644 index 000000000..e4ccb55c0 --- /dev/null +++ b/vendor/github.com/spf13/afero/sftpfs/file.go @@ -0,0 +1,95 @@ +// Copyright © 2015 Jerry Jacobs . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sftpfs + +import ( + "github.com/pkg/sftp" + "os" +) + +type File struct { + fd *sftp.File +} + +func FileOpen(s *sftp.Client, name string) (*File, error) { + fd, err := s.Open(name) + if err != nil { + return &File{}, err + } + return &File{fd: fd}, nil +} + +func FileCreate(s *sftp.Client, name string) (*File, error) { + fd, err := s.Create(name) + if err != nil { + return &File{}, err + } + return &File{fd: fd}, nil +} + +func (f *File) Close() error { + return f.fd.Close() +} + +func (f *File) Name() string { + return f.fd.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return f.fd.Stat() +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Truncate(size int64) error { + return f.fd.Truncate(size) +} + +func (f *File) Read(b []byte) (n int, err error) { + return f.fd.Read(b) +} + +// TODO +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + return 0, nil +} + +// TODO +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + return nil, nil +} + +// TODO +func (f *File) Readdirnames(n int) (names []string, err error) { + return nil, nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + return f.fd.Seek(offset, whence) +} + +func (f *File) Write(b []byte) (n int, err error) { + return f.fd.Write(b) +} + +// TODO +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + return 0, nil +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.fd.Write([]byte(s)) +} diff --git a/vendor/github.com/spf13/afero/sftpfs/sftp.go b/vendor/github.com/spf13/afero/sftpfs/sftp.go new file mode 100644 index 000000000..28721da76 --- /dev/null +++ b/vendor/github.com/spf13/afero/sftpfs/sftp.go @@ -0,0 +1,129 @@ +// Copyright © 2015 Jerry Jacobs . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sftpfs + +import ( + "os" + "time" + + "github.com/pkg/sftp" + "github.com/spf13/afero" +) + +// Fs is a afero.Fs implementation that uses functions provided by the sftp package. +// +// For details in any method, check the documentation of the sftp package +// (github.com/pkg/sftp). +type Fs struct { + client *sftp.Client +} + +func New(client *sftp.Client) afero.Fs { + return &Fs{client: client} +} + +func (s Fs) Name() string { return "sftpfs" } + +func (s Fs) Create(name string) (afero.File, error) { + return FileCreate(s.client, name) +} + +func (s Fs) Mkdir(name string, perm os.FileMode) error { + err := s.client.Mkdir(name) + if err != nil { + return err + } + return s.client.Chmod(name, perm) +} + +func (s Fs) MkdirAll(path string, perm os.FileMode) error { + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := s.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return err + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = s.MkdirAll(path[0:j-1], perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = s.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := s.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +func (s Fs) Open(name string) (afero.File, error) { + return FileOpen(s.client, name) +} + +func (s Fs) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { + return nil, nil +} + +func (s Fs) Remove(name string) error { + return s.client.Remove(name) +} + +func (s Fs) RemoveAll(path string) error { + // TODO have a look at os.RemoveAll + // https://github.com/golang/go/blob/master/src/os/path.go#L66 + return nil +} + +func (s Fs) Rename(oldname, newname string) error { + return s.client.Rename(oldname, newname) +} + +func (s Fs) Stat(name string) (os.FileInfo, error) { + return s.client.Stat(name) +} + +func (s Fs) Lstat(p string) (os.FileInfo, error) { + return s.client.Lstat(p) +} + +func (s Fs) Chmod(name string, mode os.FileMode) error { + return s.client.Chmod(name, mode) +} + +func (s Fs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return s.client.Chtimes(name, atime, mtime) +} diff --git a/vendor/github.com/spf13/afero/sftpfs/sftp_test_go b/vendor/github.com/spf13/afero/sftpfs/sftp_test_go new file mode 100644 index 000000000..bb00535d8 --- /dev/null +++ b/vendor/github.com/spf13/afero/sftpfs/sftp_test_go @@ -0,0 +1,286 @@ +// Copyright © 2015 Jerry Jacobs . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "testing" + "os" + "log" + "fmt" + "net" + "flag" + "time" + "io/ioutil" + "crypto/rsa" + _rand "crypto/rand" + "encoding/pem" + "crypto/x509" + + "golang.org/x/crypto/ssh" + "github.com/pkg/sftp" +) + +type SftpFsContext struct { + sshc *ssh.Client + sshcfg *ssh.ClientConfig + sftpc *sftp.Client +} + +// TODO we only connect with hardcoded user+pass for now +// it should be possible to use $HOME/.ssh/id_rsa to login into the stub sftp server +func SftpConnect(user, password, host string) (*SftpFsContext, error) { +/* + pemBytes, err := ioutil.ReadFile(os.Getenv("HOME") + "/.ssh/id_rsa") + if err != nil { + return nil,err + } + + signer, err := ssh.ParsePrivateKey(pemBytes) + if err != nil { + return nil,err + } + + sshcfg := &ssh.ClientConfig{ + User: user, + Auth: []ssh.AuthMethod{ + ssh.Password(password), + ssh.PublicKeys(signer), + }, + } +*/ + + sshcfg := &ssh.ClientConfig{ + User: user, + Auth: []ssh.AuthMethod{ + ssh.Password(password), + }, + } + + sshc, err := ssh.Dial("tcp", host, sshcfg) + if err != nil { + return nil,err + } + + sftpc, err := sftp.NewClient(sshc) + if err != nil { + return nil,err + } + + ctx := &SftpFsContext{ + sshc: sshc, + sshcfg: sshcfg, + sftpc: sftpc, + } + + return ctx,nil +} + +func (ctx *SftpFsContext) Disconnect() error { + ctx.sftpc.Close() + ctx.sshc.Close() + return nil +} + +// TODO for such a weird reason rootpath is "." when writing "file1" with afero sftp backend +func RunSftpServer(rootpath string) { + var ( + readOnly bool + debugLevelStr string + debugLevel int + debugStderr bool + rootDir string + ) + + flag.BoolVar(&readOnly, "R", false, "read-only server") + flag.BoolVar(&debugStderr, "e", true, "debug to stderr") + flag.StringVar(&debugLevelStr, "l", "none", "debug level") + flag.StringVar(&rootDir, "root", rootpath, "root directory") + flag.Parse() + + debugStream := ioutil.Discard + if debugStderr { + debugStream = os.Stderr + debugLevel = 1 + } + + // An SSH server is represented by a ServerConfig, which holds + // certificate details and handles authentication of ServerConns. + config := &ssh.ServerConfig{ + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + fmt.Fprintf(debugStream, "Login: %s\n", c.User()) + if c.User() == "test" && string(pass) == "test" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + } + + privateBytes, err := ioutil.ReadFile("./test/id_rsa") + if err != nil { + log.Fatal("Failed to load private key", err) + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + log.Fatal("Failed to parse private key", err) + } + + config.AddHostKey(private) + + // Once a ServerConfig has been configured, connections can be + // accepted. + listener, err := net.Listen("tcp", "0.0.0.0:2022") + if err != nil { + log.Fatal("failed to listen for connection", err) + } + fmt.Printf("Listening on %v\n", listener.Addr()) + + nConn, err := listener.Accept() + if err != nil { + log.Fatal("failed to accept incoming connection", err) + } + + // Before use, a handshake must be performed on the incoming + // net.Conn. + _, chans, reqs, err := ssh.NewServerConn(nConn, config) + if err != nil { + log.Fatal("failed to handshake", err) + } + fmt.Fprintf(debugStream, "SSH server established\n") + + // The incoming Request channel must be serviced. + go ssh.DiscardRequests(reqs) + + // Service the incoming Channel channel. + for newChannel := range chans { + // Channels have a type, depending on the application level + // protocol intended. In the case of an SFTP session, this is "subsystem" + // with a payload string of "sftp" + fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType()) + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType()) + continue + } + channel, requests, err := newChannel.Accept() + if err != nil { + log.Fatal("could not accept channel.", err) + } + fmt.Fprintf(debugStream, "Channel accepted\n") + + // Sessions have out-of-band requests such as "shell", + // "pty-req" and "env". Here we handle only the + // "subsystem" request. + go func(in <-chan *ssh.Request) { + for req := range in { + fmt.Fprintf(debugStream, "Request: %v\n", req.Type) + ok := false + switch req.Type { + case "subsystem": + fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:]) + if string(req.Payload[4:]) == "sftp" { + ok = true + } + } + fmt.Fprintf(debugStream, " - accepted: %v\n", ok) + req.Reply(ok, nil) + } + }(requests) + + server, err := sftp.NewServer(channel, channel, debugStream, debugLevel, readOnly, rootpath) + if err != nil { + log.Fatal(err) + } + if err := server.Serve(); err != nil { + log.Fatal("sftp server completed with error:", err) + } + } +} + +// MakeSSHKeyPair make a pair of public and private keys for SSH access. +// Public key is encoded in the format for inclusion in an OpenSSH authorized_keys file. +// Private Key generated is PEM encoded +func MakeSSHKeyPair(bits int, pubKeyPath, privateKeyPath string) error { + privateKey, err := rsa.GenerateKey(_rand.Reader, bits) + if err != nil { + return err + } + + // generate and write private key as PEM + privateKeyFile, err := os.Create(privateKeyPath) + defer privateKeyFile.Close() + if err != nil { + return err + } + + privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)} + if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil { + return err + } + + // generate and write public key + pub, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return err + } + + return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0655) +} + +func TestSftpCreate(t *testing.T) { + os.Mkdir("./test", 0777) + MakeSSHKeyPair(1024, "./test/id_rsa.pub", "./test/id_rsa") + + go RunSftpServer("./test/") + time.Sleep(5 * time.Second) + + ctx, err := SftpConnect("test", "test", "localhost:2022") + if err != nil { + t.Fatal(err) + } + defer ctx.Disconnect() + + var AppFs Fs = SftpFs{ + SftpClient: ctx.sftpc, + } + + AppFs.MkdirAll("test/dir1/dir2/dir3", os.FileMode(0777)) + AppFs.Mkdir("test/foo", os.FileMode(0000)) + AppFs.Chmod("test/foo", os.FileMode(0700)) + AppFs.Mkdir("test/bar", os.FileMode(0777)) + + file, err := AppFs.Create("file1") + if err != nil { + t.Error(err) + } + defer file.Close() + + file.Write([]byte("hello\t")) + file.WriteString("world!\n") + + f1, err := AppFs.Open("file1") + if err != nil { + log.Fatalf("open: %v", err) + } + defer f1.Close() + + b := make([]byte, 100) + + _, err = f1.Read(b) + fmt.Println(string(b)) + + // TODO check here if "hello\tworld\n" is in buffer b +} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 000000000..1e78f7d1e --- /dev/null +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,305 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + Base File + Layer File + Merger DirsMerger + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.Base != nil { + f.Base.Close() + } + if f.Layer != nil { + return f.Layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.Layer != nil { + n, err := f.Layer.Read(s) + if (err == nil || err == io.EOF) && f.Base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.Base != nil { + return f.Base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.Layer != nil { + n, err := f.Layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + } + return n, err + } + if f.Base != nil { + return f.Base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.Layer != nil { + pos, err = f.Layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o, w) + } + return pos, err + } + if f.Base != nil { + return f.Base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.Write(s) + if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.Base.Write(s) + } + return n, err + } + if f.Base != nil { + return f.Base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteAt(s, o) + if err == nil && f.Base != nil { + _, err = f.Base.WriteAt(s, o) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.Layer != nil { + return f.Layer.Name() + } + return f.Base.Name() +} + +// DirsMerger is how UnionFile weaves two directories together. +// It takes the FileInfo slices from the layer and the base and returns a +// single view. +type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) + +var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { + var files = make(map[string]os.FileInfo) + + for _, fi := range lofi { + files[fi.Name()] = fi + } + + for _, fi := range bofi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + + rfi := make([]os.FileInfo, len(files)) + + i := 0 + for _, fi := range files { + rfi[i] = fi + i++ + } + + return rfi, nil + +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + var merge DirsMerger = f.Merger + if merge == nil { + merge = defaultUnionMergeDirsFn + } + + if f.off == 0 { + var lfi []os.FileInfo + if f.Layer != nil { + lfi, err = f.Layer.Readdir(-1) + if err != nil { + return nil, err + } + } + + var bfi []os.FileInfo + if f.Base != nil { + bfi, err = f.Base.Readdir(-1) + if err != nil { + return nil, err + } + + } + merged, err := merge(lfi, bfi) + if err != nil { + return nil, err + } + f.files = append(f.files, merged...) + } + if c == -1 { + return f.files[f.off:], nil + } + defer func() { f.off += c }() + return f.files[f.off:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.Layer != nil { + return f.Layer.Stat() + } + if f.Base != nil { + return f.Base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.Layer != nil { + err = f.Layer.Sync() + if err == nil && f.Base != nil { + err = f.Base.Sync() + } + return err + } + if f.Base != nil { + return f.Base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.Layer != nil { + err = f.Layer.Truncate(s) + if err == nil && f.Base != nil { + err = f.Base.Truncate(s) + } + return err + } + if f.Base != nil { + return f.Base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteString(s) + if err == nil && f.Base != nil { + _, err = f.Base.WriteString(s) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteString(s) + } + return 0, BADFD +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go new file mode 100644 index 000000000..4f253f481 --- /dev/null +++ b/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,330 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + return err + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plain forms. +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func isMn(r rune) bool { + return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/vendor/github.com/spf13/afero/util_test.go b/vendor/github.com/spf13/afero/util_test.go new file mode 100644 index 000000000..b5852f184 --- /dev/null +++ b/vendor/github.com/spf13/afero/util_test.go @@ -0,0 +1,450 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package afero + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "testing" + "time" +) + +var testFS = new(MemMapFs) + +func TestDirExists(t *testing.T) { + type test struct { + input string + expected bool + } + + // First create a couple directories so there is something in the filesystem + //testFS := new(MemMapFs) + testFS.MkdirAll("/foo/bar", 0777) + + data := []test{ + {".", true}, + {"./", true}, + {"..", true}, + {"../", true}, + {"./..", true}, + {"./../", true}, + {"/foo/", true}, + {"/foo", true}, + {"/foo/bar", true}, + {"/foo/bar/", true}, + {"/", true}, + {"/some-really-random-directory-name", false}, + {"/some/really/random/directory/name", false}, + {"./some-really-random-local-directory-name", false}, + {"./some/really/random/local/directory/name", false}, + } + + for i, d := range data { + exists, _ := DirExists(testFS, filepath.FromSlash(d.input)) + if d.expected != exists { + t.Errorf("Test %d %q failed. Expected %t got %t", i, d.input, d.expected, exists) + } + } +} + +func TestIsDir(t *testing.T) { + testFS = new(MemMapFs) + + type test struct { + input string + expected bool + } + data := []test{ + {"./", true}, + {"/", true}, + {"./this-directory-does-not-existi", false}, + {"/this-absolute-directory/does-not-exist", false}, + } + + for i, d := range data { + + exists, _ := IsDir(testFS, d.input) + if d.expected != exists { + t.Errorf("Test %d failed. Expected %t got %t", i, d.expected, exists) + } + } +} + +func TestIsEmpty(t *testing.T) { + testFS = new(MemMapFs) + + zeroSizedFile, _ := createZeroSizedFileInTempDir() + defer deleteFileInTempDir(zeroSizedFile) + nonZeroSizedFile, _ := createNonZeroSizedFileInTempDir() + defer deleteFileInTempDir(nonZeroSizedFile) + emptyDirectory, _ := createEmptyTempDir() + defer deleteTempDir(emptyDirectory) + nonEmptyZeroLengthFilesDirectory, _ := createTempDirWithZeroLengthFiles() + defer deleteTempDir(nonEmptyZeroLengthFilesDirectory) + nonEmptyNonZeroLengthFilesDirectory, _ := createTempDirWithNonZeroLengthFiles() + defer deleteTempDir(nonEmptyNonZeroLengthFilesDirectory) + nonExistentFile := os.TempDir() + "/this-file-does-not-exist.txt" + nonExistentDir := os.TempDir() + "/this/direcotry/does/not/exist/" + + fileDoesNotExist := fmt.Errorf("%q path does not exist", nonExistentFile) + dirDoesNotExist := fmt.Errorf("%q path does not exist", nonExistentDir) + + type test struct { + input string + expectedResult bool + expectedErr error + } + + data := []test{ + {zeroSizedFile.Name(), true, nil}, + {nonZeroSizedFile.Name(), false, nil}, + {emptyDirectory, true, nil}, + {nonEmptyZeroLengthFilesDirectory, false, nil}, + {nonEmptyNonZeroLengthFilesDirectory, false, nil}, + {nonExistentFile, false, fileDoesNotExist}, + {nonExistentDir, false, dirDoesNotExist}, + } + for i, d := range data { + exists, err := IsEmpty(testFS, d.input) + if d.expectedResult != exists { + t.Errorf("Test %d %q failed exists. Expected result %t got %t", i, d.input, d.expectedResult, exists) + } + if d.expectedErr != nil { + if d.expectedErr.Error() != err.Error() { + t.Errorf("Test %d failed with err. Expected %q(%#v) got %q(%#v)", i, d.expectedErr, d.expectedErr, err, err) + } + } else { + if d.expectedErr != err { + t.Errorf("Test %d failed. Expected error %q(%#v) got %q(%#v)", i, d.expectedErr, d.expectedErr, err, err) + } + } + } +} + +func TestReaderContains(t *testing.T) { + for i, this := range []struct { + v1 string + v2 [][]byte + expect bool + }{ + {"abc", [][]byte{[]byte("a")}, true}, + {"abc", [][]byte{[]byte("b")}, true}, + {"abcdefg", [][]byte{[]byte("efg")}, true}, + {"abc", [][]byte{[]byte("d")}, false}, + {"abc", [][]byte{[]byte("d"), []byte("e")}, false}, + {"abc", [][]byte{[]byte("d"), []byte("a")}, true}, + {"abc", [][]byte{[]byte("b"), []byte("e")}, true}, + {"", nil, false}, + {"", [][]byte{[]byte("a")}, false}, + {"a", [][]byte{[]byte("")}, false}, + {"", [][]byte{[]byte("")}, false}} { + result := readerContainsAny(strings.NewReader(this.v1), this.v2...) + if result != this.expect { + t.Errorf("[%d] readerContains: got %t but expected %t", i, result, this.expect) + } + } + + if readerContainsAny(nil, []byte("a")) { + t.Error("readerContains with nil reader") + } + + if readerContainsAny(nil, nil) { + t.Error("readerContains with nil arguments") + } +} + +func createZeroSizedFileInTempDir() (File, error) { + filePrefix := "_path_test_" + f, e := TempFile(testFS, "", filePrefix) // dir is os.TempDir() + if e != nil { + // if there was an error no file was created. + // => no requirement to delete the file + return nil, e + } + return f, nil +} + +func createNonZeroSizedFileInTempDir() (File, error) { + f, err := createZeroSizedFileInTempDir() + if err != nil { + // no file ?? + } + byteString := []byte("byteString") + err = WriteFile(testFS, f.Name(), byteString, 0644) + if err != nil { + // delete the file + deleteFileInTempDir(f) + return nil, err + } + return f, nil +} + +func deleteFileInTempDir(f File) { + err := testFS.Remove(f.Name()) + if err != nil { + // now what? + } +} + +func createEmptyTempDir() (string, error) { + dirPrefix := "_dir_prefix_" + d, e := TempDir(testFS, "", dirPrefix) // will be in os.TempDir() + if e != nil { + // no directory to delete - it was never created + return "", e + } + return d, nil +} + +func createTempDirWithZeroLengthFiles() (string, error) { + d, dirErr := createEmptyTempDir() + if dirErr != nil { + //now what? + } + filePrefix := "_path_test_" + _, fileErr := TempFile(testFS, d, filePrefix) // dir is os.TempDir() + if fileErr != nil { + // if there was an error no file was created. + // but we need to remove the directory to clean-up + deleteTempDir(d) + return "", fileErr + } + // the dir now has one, zero length file in it + return d, nil + +} + +func createTempDirWithNonZeroLengthFiles() (string, error) { + d, dirErr := createEmptyTempDir() + if dirErr != nil { + //now what? + } + filePrefix := "_path_test_" + f, fileErr := TempFile(testFS, d, filePrefix) // dir is os.TempDir() + if fileErr != nil { + // if there was an error no file was created. + // but we need to remove the directory to clean-up + deleteTempDir(d) + return "", fileErr + } + byteString := []byte("byteString") + fileErr = WriteFile(testFS, f.Name(), byteString, 0644) + if fileErr != nil { + // delete the file + deleteFileInTempDir(f) + // also delete the directory + deleteTempDir(d) + return "", fileErr + } + + // the dir now has one, zero length file in it + return d, nil + +} + +func TestExists(t *testing.T) { + zeroSizedFile, _ := createZeroSizedFileInTempDir() + defer deleteFileInTempDir(zeroSizedFile) + nonZeroSizedFile, _ := createNonZeroSizedFileInTempDir() + defer deleteFileInTempDir(nonZeroSizedFile) + emptyDirectory, _ := createEmptyTempDir() + defer deleteTempDir(emptyDirectory) + nonExistentFile := os.TempDir() + "/this-file-does-not-exist.txt" + nonExistentDir := os.TempDir() + "/this/direcotry/does/not/exist/" + + type test struct { + input string + expectedResult bool + expectedErr error + } + + data := []test{ + {zeroSizedFile.Name(), true, nil}, + {nonZeroSizedFile.Name(), true, nil}, + {emptyDirectory, true, nil}, + {nonExistentFile, false, nil}, + {nonExistentDir, false, nil}, + } + for i, d := range data { + exists, err := Exists(testFS, d.input) + if d.expectedResult != exists { + t.Errorf("Test %d failed. Expected result %t got %t", i, d.expectedResult, exists) + } + if d.expectedErr != err { + t.Errorf("Test %d failed. Expected %q got %q", i, d.expectedErr, err) + } + } + +} + +func TestSafeWriteToDisk(t *testing.T) { + emptyFile, _ := createZeroSizedFileInTempDir() + defer deleteFileInTempDir(emptyFile) + tmpDir, _ := createEmptyTempDir() + defer deleteTempDir(tmpDir) + + randomString := "This is a random string!" + reader := strings.NewReader(randomString) + + fileExists := fmt.Errorf("%v already exists", emptyFile.Name()) + + type test struct { + filename string + expectedErr error + } + + now := time.Now().Unix() + nowStr := strconv.FormatInt(now, 10) + data := []test{ + {emptyFile.Name(), fileExists}, + {tmpDir + "/" + nowStr, nil}, + } + + for i, d := range data { + e := SafeWriteReader(testFS, d.filename, reader) + if d.expectedErr != nil { + if d.expectedErr.Error() != e.Error() { + t.Errorf("Test %d failed. Expected error %q but got %q", i, d.expectedErr.Error(), e.Error()) + } + } else { + if d.expectedErr != e { + t.Errorf("Test %d failed. Expected %q but got %q", i, d.expectedErr, e) + } + contents, _ := ReadFile(testFS, d.filename) + if randomString != string(contents) { + t.Errorf("Test %d failed. Expected contents %q but got %q", i, randomString, string(contents)) + } + } + reader.Seek(0, 0) + } +} + +func TestWriteToDisk(t *testing.T) { + emptyFile, _ := createZeroSizedFileInTempDir() + defer deleteFileInTempDir(emptyFile) + tmpDir, _ := createEmptyTempDir() + defer deleteTempDir(tmpDir) + + randomString := "This is a random string!" + reader := strings.NewReader(randomString) + + type test struct { + filename string + expectedErr error + } + + now := time.Now().Unix() + nowStr := strconv.FormatInt(now, 10) + data := []test{ + {emptyFile.Name(), nil}, + {tmpDir + "/" + nowStr, nil}, + } + + for i, d := range data { + e := WriteReader(testFS, d.filename, reader) + if d.expectedErr != e { + t.Errorf("Test %d failed. WriteToDisk Error Expected %q but got %q", i, d.expectedErr, e) + } + contents, e := ReadFile(testFS, d.filename) + if e != nil { + t.Errorf("Test %d failed. Could not read file %s. Reason: %s\n", i, d.filename, e) + } + if randomString != string(contents) { + t.Errorf("Test %d failed. Expected contents %q but got %q", i, randomString, string(contents)) + } + reader.Seek(0, 0) + } +} + +func TestGetTempDir(t *testing.T) { + dir := os.TempDir() + if FilePathSeparator != dir[len(dir)-1:] { + dir = dir + FilePathSeparator + } + testDir := "hugoTestFolder" + FilePathSeparator + tests := []struct { + input string + expected string + }{ + {"", dir}, + {testDir + " Foo bar ", dir + testDir + " Foo bar " + FilePathSeparator}, + {testDir + "Foo.Bar/foo_Bar-Foo", dir + testDir + "Foo.Bar/foo_Bar-Foo" + FilePathSeparator}, + {testDir + "fOO,bar:foo%bAR", dir + testDir + "fOObarfoo%bAR" + FilePathSeparator}, + {testDir + "FOo/BaR.html", dir + testDir + "FOo/BaR.html" + FilePathSeparator}, + {testDir + "трям/трям", dir + testDir + "трям/трям" + FilePathSeparator}, + {testDir + "은행", dir + testDir + "은행" + FilePathSeparator}, + {testDir + "Банковский кассир", dir + testDir + "Банковский кассир" + FilePathSeparator}, + } + + for _, test := range tests { + output := GetTempDir(new(MemMapFs), test.input) + if output != test.expected { + t.Errorf("Expected %#v, got %#v\n", test.expected, output) + } + } +} + +// This function is very dangerous. Don't use it. +func deleteTempDir(d string) { + err := os.RemoveAll(d) + if err != nil { + // now what? + } +} + +func TestFullBaseFsPath(t *testing.T) { + type dirSpec struct { + Dir1, Dir2, Dir3 string + } + dirSpecs := []dirSpec{ + dirSpec{Dir1: "/", Dir2: "/", Dir3: "/"}, + dirSpec{Dir1: "/", Dir2: "/path2", Dir3: "/"}, + dirSpec{Dir1: "/path1/dir", Dir2: "/path2/dir/", Dir3: "/path3/dir"}, + dirSpec{Dir1: "C:/path1", Dir2: "path2/dir", Dir3: "/path3/dir/"}, + } + + for _, ds := range dirSpecs { + memFs := NewMemMapFs() + level1Fs := NewBasePathFs(memFs, ds.Dir1) + level2Fs := NewBasePathFs(level1Fs, ds.Dir2) + level3Fs := NewBasePathFs(level2Fs, ds.Dir3) + + type spec struct { + BaseFs Fs + FileName string + ExpectedPath string + } + specs := []spec{ + spec{BaseFs: level3Fs, FileName: "f.txt", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, ds.Dir3, "f.txt")}, + spec{BaseFs: level3Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, ds.Dir3, "")}, + spec{BaseFs: level2Fs, FileName: "f.txt", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, "f.txt")}, + spec{BaseFs: level2Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, "")}, + spec{BaseFs: level1Fs, FileName: "f.txt", ExpectedPath: filepath.Join(ds.Dir1, "f.txt")}, + spec{BaseFs: level1Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, "")}, + } + + for _, s := range specs { + if actualPath := FullBaseFsPath(s.BaseFs.(*BasePathFs), s.FileName); actualPath != s.ExpectedPath { + t.Errorf("Expected \n%s got \n%s", s.ExpectedPath, actualPath) + } + } + } +} diff --git a/vendor/github.com/spf13/cast/.gitignore b/vendor/github.com/spf13/cast/.gitignore new file mode 100644 index 000000000..53053a8ac --- /dev/null +++ b/vendor/github.com/spf13/cast/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.bench diff --git a/vendor/github.com/spf13/cast/.travis.yml b/vendor/github.com/spf13/cast/.travis.yml new file mode 100644 index 000000000..4da976684 --- /dev/null +++ b/vendor/github.com/spf13/cast/.travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: required +go: + - 1.7.5 + - 1.8 + - tip +os: + - linux +matrix: + allow_failures: + - go: tip + fast_finish: true +script: + - make check diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE new file mode 100644 index 000000000..4527efb9c --- /dev/null +++ b/vendor/github.com/spf13/cast/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile new file mode 100644 index 000000000..7ccf8930b --- /dev/null +++ b/vendor/github.com/spf13/cast/Makefile @@ -0,0 +1,38 @@ +# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html + +.PHONY: check fmt lint test test-race vet test-cover-html help +.DEFAULT_GOAL := help + +check: test-race fmt vet lint ## Run tests and linters + +test: ## Run tests + go test ./... + +test-race: ## Run tests with race detector + go test -race ./... + +fmt: ## Run gofmt linter + @for d in `go list` ; do \ + if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \ + echo "^ improperly formatted go files" && echo && exit 1; \ + fi \ + done + +lint: ## Run golint linter + @for d in `go list` ; do \ + if [ "`golint $$d | tee /dev/stderr`" ]; then \ + echo "^ golint errors!" && echo && exit 1; \ + fi \ + done + +vet: ## Run go vet linter + @if [ "`go vet | tee /dev/stderr`" ]; then \ + echo "^ go vet errors!" && echo && exit 1; \ + fi + +test-cover-html: ## Generate test coverage report + go test -coverprofile=coverage.out -covermode=count + go tool cover -func=coverage.out + +help: + @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md new file mode 100644 index 000000000..e6939397d --- /dev/null +++ b/vendor/github.com/spf13/cast/README.md @@ -0,0 +1,75 @@ +cast +==== +[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast) +[![Build Status](https://api.travis-ci.org/spf13/cast.svg?branch=master)](https://travis-ci.org/spf13/cast) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) + +Easy and safe casting from one type to another in Go + +Don’t Panic! ... Cast + +## What is Cast? + +Cast is a library to convert between different go types in a consistent and easy way. + +Cast provides simple functions to easily convert a number to a string, an +interface into a bool, etc. Cast does this intelligently when an obvious +conversion is possible. It doesn’t make any attempts to guess what you meant, +for example you can only convert a string to an int when it is a string +representation of an int such as “8”. Cast was developed for use in +[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON +for meta data. + +## Why use Cast? + +When working with dynamic data in Go you often need to cast or convert the data +from one type into another. Cast goes beyond just using type assertion (though +it uses that when possible) to provide a very straightforward and convenient +library. + +If you are working with interfaces to handle things like dynamic content +you’ll need an easy way to convert an interface into a given type. This +is the library for you. + +If you are taking in data from YAML, TOML or JSON or other formats which lack +full types, then Cast is the library for you. + +## Usage + +Cast provides a handful of To_____ methods. These methods will always return +the desired type. **If input is provided that will not convert to that type, the +0 or nil value for that type will be returned**. + +Cast also provides identical methods To_____E. These return the same result as +the To_____ methods, plus an additional error which tells you if it successfully +converted. Using these methods you can tell the difference between when the +input matched the zero value or when the conversion failed and the zero value +was returned. + +The following examples are merely a sample of what is available. Please review +the code for a complete set. + +### Example ‘ToString’: + + cast.ToString("mayonegg") // "mayonegg" + cast.ToString(8) // "8" + cast.ToString(8.31) // "8.31" + cast.ToString([]byte("one time")) // "one time" + cast.ToString(nil) // "" + + var foo interface{} = "one more time" + cast.ToString(foo) // "one more time" + + +### Example ‘ToInt’: + + cast.ToInt(8) // 8 + cast.ToInt(8.31) // 8 + cast.ToInt("8") // 8 + cast.ToInt(true) // 1 + cast.ToInt(false) // 0 + + var eight interface{} = 8 + cast.ToInt(eight) // 8 + cast.ToInt(nil) // 0 + diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go new file mode 100644 index 000000000..8b8c208be --- /dev/null +++ b/vendor/github.com/spf13/cast/cast.go @@ -0,0 +1,159 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Package cast provides easy and safe casting in Go. +package cast + +import "time" + +// ToBool casts an interface to a bool type. +func ToBool(i interface{}) bool { + v, _ := ToBoolE(i) + return v +} + +// ToTime casts an interface to a time.Time type. +func ToTime(i interface{}) time.Time { + v, _ := ToTimeE(i) + return v +} + +// ToDuration casts an interface to a time.Duration type. +func ToDuration(i interface{}) time.Duration { + v, _ := ToDurationE(i) + return v +} + +// ToFloat64 casts an interface to a float64 type. +func ToFloat64(i interface{}) float64 { + v, _ := ToFloat64E(i) + return v +} + +// ToFloat32 casts an interface to a float32 type. +func ToFloat32(i interface{}) float32 { + v, _ := ToFloat32E(i) + return v +} + +// ToInt64 casts an interface to an int64 type. +func ToInt64(i interface{}) int64 { + v, _ := ToInt64E(i) + return v +} + +// ToInt32 casts an interface to an int32 type. +func ToInt32(i interface{}) int32 { + v, _ := ToInt32E(i) + return v +} + +// ToInt16 casts an interface to an int16 type. +func ToInt16(i interface{}) int16 { + v, _ := ToInt16E(i) + return v +} + +// ToInt8 casts an interface to an int8 type. +func ToInt8(i interface{}) int8 { + v, _ := ToInt8E(i) + return v +} + +// ToInt casts an interface to an int type. +func ToInt(i interface{}) int { + v, _ := ToIntE(i) + return v +} + +// ToUint casts an interface to a uint type. +func ToUint(i interface{}) uint { + v, _ := ToUintE(i) + return v +} + +// ToUint64 casts an interface to a uint64 type. +func ToUint64(i interface{}) uint64 { + v, _ := ToUint64E(i) + return v +} + +// ToUint32 casts an interface to a uint32 type. +func ToUint32(i interface{}) uint32 { + v, _ := ToUint32E(i) + return v +} + +// ToUint16 casts an interface to a uint16 type. +func ToUint16(i interface{}) uint16 { + v, _ := ToUint16E(i) + return v +} + +// ToUint8 casts an interface to a uint8 type. +func ToUint8(i interface{}) uint8 { + v, _ := ToUint8E(i) + return v +} + +// ToString casts an interface to a string type. +func ToString(i interface{}) string { + v, _ := ToStringE(i) + return v +} + +// ToStringMapString casts an interface to a map[string]string type. +func ToStringMapString(i interface{}) map[string]string { + v, _ := ToStringMapStringE(i) + return v +} + +// ToStringMapStringSlice casts an interface to a map[string][]string type. +func ToStringMapStringSlice(i interface{}) map[string][]string { + v, _ := ToStringMapStringSliceE(i) + return v +} + +// ToStringMapBool casts an interface to a map[string]bool type. +func ToStringMapBool(i interface{}) map[string]bool { + v, _ := ToStringMapBoolE(i) + return v +} + +// ToStringMap casts an interface to a map[string]interface{} type. +func ToStringMap(i interface{}) map[string]interface{} { + v, _ := ToStringMapE(i) + return v +} + +// ToSlice casts an interface to a []interface{} type. +func ToSlice(i interface{}) []interface{} { + v, _ := ToSliceE(i) + return v +} + +// ToBoolSlice casts an interface to a []bool type. +func ToBoolSlice(i interface{}) []bool { + v, _ := ToBoolSliceE(i) + return v +} + +// ToStringSlice casts an interface to a []string type. +func ToStringSlice(i interface{}) []string { + v, _ := ToStringSliceE(i) + return v +} + +// ToIntSlice casts an interface to a []int type. +func ToIntSlice(i interface{}) []int { + v, _ := ToIntSliceE(i) + return v +} + +// ToDurationSlice casts an interface to a []time.Duration type. +func ToDurationSlice(i interface{}) []time.Duration { + v, _ := ToDurationSliceE(i) + return v +} diff --git a/vendor/github.com/spf13/cast/cast_test.go b/vendor/github.com/spf13/cast/cast_test.go new file mode 100644 index 000000000..d9b0b013d --- /dev/null +++ b/vendor/github.com/spf13/cast/cast_test.go @@ -0,0 +1,1208 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "fmt" + "html/template" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestToUintE(t *testing.T) { + tests := []struct { + input interface{} + expect uint + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8, false}, + {float64(8.31), 8, false}, + {true, 1, false}, + {false, 0, false}, + {"8", 8, false}, + {nil, 0, false}, + // errors + {int(-8), 0, true}, + {int8(-8), 0, true}, + {int16(-8), 0, true}, + {int32(-8), 0, true}, + {int64(-8), 0, true}, + {float32(-8.31), 0, true}, + {float64(-8.31), 0, true}, + {"-8", 0, true}, + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToUintE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test: + v = ToUint(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToUint64E(t *testing.T) { + tests := []struct { + input interface{} + expect uint64 + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8, false}, + {float64(8.31), 8, false}, + {true, 1, false}, + {false, 0, false}, + {"8", 8, false}, + {nil, 0, false}, + // errors + {int(-8), 0, true}, + {int8(-8), 0, true}, + {int16(-8), 0, true}, + {int32(-8), 0, true}, + {int64(-8), 0, true}, + {float32(-8.31), 0, true}, + {float64(-8.31), 0, true}, + {"-8", 0, true}, + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToUint64E(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test: + v = ToUint64(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToUint32E(t *testing.T) { + tests := []struct { + input interface{} + expect uint32 + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8, false}, + {float64(8.31), 8, false}, + {true, 1, false}, + {false, 0, false}, + {"8", 8, false}, + {nil, 0, false}, + {int(-8), 0, true}, + {int8(-8), 0, true}, + {int16(-8), 0, true}, + {int32(-8), 0, true}, + {int64(-8), 0, true}, + {float32(-8.31), 0, true}, + {float64(-8.31), 0, true}, + {"-8", 0, true}, + // errors + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToUint32E(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test: + v = ToUint32(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToUint16E(t *testing.T) { + tests := []struct { + input interface{} + expect uint16 + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8, false}, + {float64(8.31), 8, false}, + {true, 1, false}, + {false, 0, false}, + {"8", 8, false}, + {nil, 0, false}, + // errors + {int(-8), 0, true}, + {int8(-8), 0, true}, + {int16(-8), 0, true}, + {int32(-8), 0, true}, + {int64(-8), 0, true}, + {float32(-8.31), 0, true}, + {float64(-8.31), 0, true}, + {"-8", 0, true}, + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToUint16E(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToUint16(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToUint8E(t *testing.T) { + tests := []struct { + input interface{} + expect uint8 + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8, false}, + {float64(8.31), 8, false}, + {true, 1, false}, + {false, 0, false}, + {"8", 8, false}, + {nil, 0, false}, + // errors + {int(-8), 0, true}, + {int8(-8), 0, true}, + {int16(-8), 0, true}, + {int32(-8), 0, true}, + {int64(-8), 0, true}, + {float32(-8.31), 0, true}, + {float64(-8.31), 0, true}, + {"-8", 0, true}, + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToUint8E(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToUint8(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToIntE(t *testing.T) { + tests := []struct { + input interface{} + expect int + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8, false}, + {float64(8.31), 8, false}, + {true, 1, false}, + {false, 0, false}, + {"8", 8, false}, + {nil, 0, false}, + // errors + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToIntE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToInt(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToInt64E(t *testing.T) { + tests := []struct { + input interface{} + expect int64 + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8, false}, + {float64(8.31), 8, false}, + {true, 1, false}, + {false, 0, false}, + {"8", 8, false}, + {nil, 0, false}, + // errors + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToInt64E(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToInt64(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToInt32E(t *testing.T) { + tests := []struct { + input interface{} + expect int32 + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8, false}, + {float64(8.31), 8, false}, + {true, 1, false}, + {false, 0, false}, + {"8", 8, false}, + {nil, 0, false}, + // errors + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToInt32E(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToInt32(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToInt16E(t *testing.T) { + tests := []struct { + input interface{} + expect int16 + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8, false}, + {float64(8.31), 8, false}, + {true, 1, false}, + {false, 0, false}, + {"8", 8, false}, + {nil, 0, false}, + // errors + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToInt16E(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToInt16(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToInt8E(t *testing.T) { + tests := []struct { + input interface{} + expect int8 + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8, false}, + {float64(8.31), 8, false}, + {true, 1, false}, + {false, 0, false}, + {"8", 8, false}, + {nil, 0, false}, + // errors + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToInt8E(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToInt8(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToFloat64E(t *testing.T) { + tests := []struct { + input interface{} + expect float64 + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8), 8, false}, + {float64(8.31), 8.31, false}, + {"8", 8, false}, + {true, 1, false}, + {false, 0, false}, + // errors + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToFloat64E(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToFloat64(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToFloat32E(t *testing.T) { + tests := []struct { + input interface{} + expect float32 + iserr bool + }{ + {int(8), 8, false}, + {int8(8), 8, false}, + {int16(8), 8, false}, + {int32(8), 8, false}, + {int64(8), 8, false}, + {uint(8), 8, false}, + {uint8(8), 8, false}, + {uint16(8), 8, false}, + {uint32(8), 8, false}, + {uint64(8), 8, false}, + {float32(8.31), 8.31, false}, + {float64(8.31), 8.31, false}, + {"8", 8, false}, + {true, 1, false}, + {false, 0, false}, + // errors + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToFloat32E(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToFloat32(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToStringE(t *testing.T) { + type Key struct { + k string + } + key := &Key{"foo"} + + tests := []struct { + input interface{} + expect string + iserr bool + }{ + {int(8), "8", false}, + {int8(8), "8", false}, + {int16(8), "8", false}, + {int32(8), "8", false}, + {int64(8), "8", false}, + {uint(8), "8", false}, + {uint8(8), "8", false}, + {uint16(8), "8", false}, + {uint32(8), "8", false}, + {uint64(8), "8", false}, + {float32(8.31), "8.31", false}, + {float64(8.31), "8.31", false}, + {true, "true", false}, + {false, "false", false}, + {nil, "", false}, + {[]byte("one time"), "one time", false}, + {"one more time", "one more time", false}, + {template.HTML("one time"), "one time", false}, + {template.URL("http://somehost.foo"), "http://somehost.foo", false}, + {template.JS("(1+2)"), "(1+2)", false}, + {template.CSS("a"), "a", false}, + {template.HTMLAttr("a"), "a", false}, + // errors + {testing.T{}, "", true}, + {key, "", true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToStringE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToString(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +type foo struct { + val string +} + +func (x foo) String() string { + return x.val +} + +func TestStringerToString(t *testing.T) { + var x foo + x.val = "bar" + assert.Equal(t, "bar", ToString(x)) +} + +type fu struct { + val string +} + +func (x fu) Error() string { + return x.val +} + +func TestErrorToString(t *testing.T) { + var x fu + x.val = "bar" + assert.Equal(t, "bar", ToString(x)) +} + +func TestStringMapStringSliceE(t *testing.T) { + // ToStringMapString inputs/outputs + var stringMapString = map[string]string{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"} + var stringMapInterface = map[string]interface{}{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"} + var interfaceMapString = map[interface{}]string{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"} + var interfaceMapInterface = map[interface{}]interface{}{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"} + + // ToStringMapStringSlice inputs/outputs + var stringMapStringSlice = map[string][]string{"key 1": {"value 1", "value 2", "value 3"}, "key 2": {"value 1", "value 2", "value 3"}, "key 3": {"value 1", "value 2", "value 3"}} + var stringMapInterfaceSlice = map[string][]interface{}{"key 1": {"value 1", "value 2", "value 3"}, "key 2": {"value 1", "value 2", "value 3"}, "key 3": {"value 1", "value 2", "value 3"}} + var stringMapInterfaceInterfaceSlice = map[string]interface{}{"key 1": []interface{}{"value 1", "value 2", "value 3"}, "key 2": []interface{}{"value 1", "value 2", "value 3"}, "key 3": []interface{}{"value 1", "value 2", "value 3"}} + var stringMapStringSingleSliceFieldsResult = map[string][]string{"key 1": {"value", "1"}, "key 2": {"value", "2"}, "key 3": {"value", "3"}} + var interfaceMapStringSlice = map[interface{}][]string{"key 1": {"value 1", "value 2", "value 3"}, "key 2": {"value 1", "value 2", "value 3"}, "key 3": {"value 1", "value 2", "value 3"}} + var interfaceMapInterfaceSlice = map[interface{}][]interface{}{"key 1": {"value 1", "value 2", "value 3"}, "key 2": {"value 1", "value 2", "value 3"}, "key 3": {"value 1", "value 2", "value 3"}} + + var stringMapStringSliceMultiple = map[string][]string{"key 1": {"value 1", "value 2", "value 3"}, "key 2": {"value 1", "value 2", "value 3"}, "key 3": {"value 1", "value 2", "value 3"}} + var stringMapStringSliceSingle = map[string][]string{"key 1": {"value 1"}, "key 2": {"value 2"}, "key 3": {"value 3"}} + + var stringMapInterface1 = map[string]interface{}{"key 1": []string{"value 1"}, "key 2": []string{"value 2"}} + var stringMapInterfaceResult1 = map[string][]string{"key 1": {"value 1"}, "key 2": {"value 2"}} + + var jsonStringMapString = `{"key 1": "value 1", "key 2": "value 2"}` + var jsonStringMapStringArray = `{"key 1": ["value 1"], "key 2": ["value 2", "value 3"]}` + var jsonStringMapStringArrayResult = map[string][]string{"key 1": {"value 1"}, "key 2": {"value 2", "value 3"}} + + type Key struct { + k string + } + + tests := []struct { + input interface{} + expect map[string][]string + iserr bool + }{ + {stringMapStringSlice, stringMapStringSlice, false}, + {stringMapInterfaceSlice, stringMapStringSlice, false}, + {stringMapInterfaceInterfaceSlice, stringMapStringSlice, false}, + {stringMapStringSliceMultiple, stringMapStringSlice, false}, + {stringMapStringSliceMultiple, stringMapStringSlice, false}, + {stringMapString, stringMapStringSliceSingle, false}, + {stringMapInterface, stringMapStringSliceSingle, false}, + {stringMapInterface1, stringMapInterfaceResult1, false}, + {interfaceMapStringSlice, stringMapStringSlice, false}, + {interfaceMapInterfaceSlice, stringMapStringSlice, false}, + {interfaceMapString, stringMapStringSingleSliceFieldsResult, false}, + {interfaceMapInterface, stringMapStringSingleSliceFieldsResult, false}, + {jsonStringMapStringArray, jsonStringMapStringArrayResult, false}, + + // errors + {nil, nil, true}, + {testing.T{}, nil, true}, + {map[interface{}]interface{}{"foo": testing.T{}}, nil, true}, + {map[interface{}]interface{}{Key{"foo"}: "bar"}, nil, true}, // ToStringE(Key{"foo"}) should fail + {jsonStringMapString, nil, true}, + {"", nil, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToStringMapStringSliceE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToStringMapStringSlice(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToStringMapE(t *testing.T) { + tests := []struct { + input interface{} + expect map[string]interface{} + iserr bool + }{ + {map[interface{}]interface{}{"tag": "tags", "group": "groups"}, map[string]interface{}{"tag": "tags", "group": "groups"}, false}, + {map[string]interface{}{"tag": "tags", "group": "groups"}, map[string]interface{}{"tag": "tags", "group": "groups"}, false}, + {`{"tag": "tags", "group": "groups"}`, map[string]interface{}{"tag": "tags", "group": "groups"}, false}, + {`{"tag": "tags", "group": true}`, map[string]interface{}{"tag": "tags", "group": true}, false}, + + // errors + {nil, nil, true}, + {testing.T{}, nil, true}, + {"", nil, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToStringMapE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToStringMap(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToStringMapBoolE(t *testing.T) { + tests := []struct { + input interface{} + expect map[string]bool + iserr bool + }{ + {map[interface{}]interface{}{"v1": true, "v2": false}, map[string]bool{"v1": true, "v2": false}, false}, + {map[string]interface{}{"v1": true, "v2": false}, map[string]bool{"v1": true, "v2": false}, false}, + {map[string]bool{"v1": true, "v2": false}, map[string]bool{"v1": true, "v2": false}, false}, + {`{"v1": true, "v2": false}`, map[string]bool{"v1": true, "v2": false}, false}, + + // errors + {nil, nil, true}, + {testing.T{}, nil, true}, + {"", nil, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToStringMapBoolE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToStringMapBool(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToStringMapStringE(t *testing.T) { + var stringMapString = map[string]string{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"} + var stringMapInterface = map[string]interface{}{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"} + var interfaceMapString = map[interface{}]string{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"} + var interfaceMapInterface = map[interface{}]interface{}{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"} + var jsonString = `{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"}` + var invalidJsonString = `{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"` + var emptyString = "" + + tests := []struct { + input interface{} + expect map[string]string + iserr bool + }{ + {stringMapString, stringMapString, false}, + {stringMapInterface, stringMapString, false}, + {interfaceMapString, stringMapString, false}, + {interfaceMapInterface, stringMapString, false}, + {jsonString, stringMapString, false}, + + // errors + {nil, nil, true}, + {testing.T{}, nil, true}, + {invalidJsonString, nil, true}, + {emptyString, nil, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToStringMapStringE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToStringMapString(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToBoolSliceE(t *testing.T) { + tests := []struct { + input interface{} + expect []bool + iserr bool + }{ + {[]bool{true, false, true}, []bool{true, false, true}, false}, + {[]interface{}{true, false, true}, []bool{true, false, true}, false}, + {[]int{1, 0, 1}, []bool{true, false, true}, false}, + {[]string{"true", "false", "true"}, []bool{true, false, true}, false}, + // errors + {nil, nil, true}, + {testing.T{}, nil, true}, + {[]string{"foo", "bar"}, nil, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToBoolSliceE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToBoolSlice(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToIntSliceE(t *testing.T) { + tests := []struct { + input interface{} + expect []int + iserr bool + }{ + {[]int{1, 3}, []int{1, 3}, false}, + {[]interface{}{1.2, 3.2}, []int{1, 3}, false}, + {[]string{"2", "3"}, []int{2, 3}, false}, + {[2]string{"2", "3"}, []int{2, 3}, false}, + // errors + {nil, nil, true}, + {testing.T{}, nil, true}, + {[]string{"foo", "bar"}, nil, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToIntSliceE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToIntSlice(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToSliceE(t *testing.T) { + tests := []struct { + input interface{} + expect []interface{} + iserr bool + }{ + {[]interface{}{1, 3}, []interface{}{1, 3}, false}, + {[]map[string]interface{}{{"k1": 1}, {"k2": 2}}, []interface{}{map[string]interface{}{"k1": 1}, map[string]interface{}{"k2": 2}}, false}, + // errors + {nil, nil, true}, + {testing.T{}, nil, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToSliceE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToSlice(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToStringSliceE(t *testing.T) { + tests := []struct { + input interface{} + expect []string + iserr bool + }{ + {[]string{"a", "b"}, []string{"a", "b"}, false}, + {[]interface{}{1, 3}, []string{"1", "3"}, false}, + {interface{}(1), []string{"1"}, false}, + // errors + {nil, nil, true}, + {testing.T{}, nil, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToStringSliceE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToStringSlice(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToDurationSliceE(t *testing.T) { + tests := []struct { + input interface{} + expect []time.Duration + iserr bool + }{ + {[]string{"1s", "1m"}, []time.Duration{time.Second, time.Minute}, false}, + {[]int{1, 2}, []time.Duration{1, 2}, false}, + {[]interface{}{1, 3}, []time.Duration{1, 3}, false}, + {[]time.Duration{1, 3}, []time.Duration{1, 3}, false}, + + // errors + {nil, nil, true}, + {testing.T{}, nil, true}, + {[]string{"invalid"}, nil, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToDurationSliceE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToDurationSlice(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func TestToBoolE(t *testing.T) { + tests := []struct { + input interface{} + expect bool + iserr bool + }{ + {0, false, false}, + {nil, false, false}, + {"false", false, false}, + {"FALSE", false, false}, + {"False", false, false}, + {"f", false, false}, + {"F", false, false}, + {false, false, false}, + + {"true", true, false}, + {"TRUE", true, false}, + {"True", true, false}, + {"t", true, false}, + {"T", true, false}, + {1, true, false}, + {true, true, false}, + {-1, true, false}, + + // errors + {"test", false, true}, + {testing.T{}, false, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToBoolE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToBool(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} + +func BenchmarkTooBool(b *testing.B) { + for i := 0; i < b.N; i++ { + if !ToBool(true) { + b.Fatal("ToBool returned false") + } + } +} + +func TestIndirectPointers(t *testing.T) { + x := 13 + y := &x + z := &y + + assert.Equal(t, ToInt(y), 13) + assert.Equal(t, ToInt(z), 13) +} + +func TestToTimeEE(t *testing.T) { + tests := []struct { + input interface{} + expect time.Time + iserr bool + }{ + {"2009-11-10 23:00:00 +0000 UTC", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // Time.String() + {"Tue Nov 10 23:00:00 2009", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // ANSIC + {"Tue Nov 10 23:00:00 UTC 2009", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // UnixDate + {"Tue Nov 10 23:00:00 +0000 2009", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RubyDate + {"10 Nov 09 23:00 UTC", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC822 + {"10 Nov 09 23:00 +0000", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC822Z + {"Tuesday, 10-Nov-09 23:00:00 UTC", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC850 + {"Tue, 10 Nov 2009 23:00:00 UTC", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC1123 + {"Tue, 10 Nov 2009 23:00:00 +0000", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC1123Z + {"2009-11-10T23:00:00Z", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC3339 + {"2009-11-10T23:00:00Z", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC3339Nano + {"11:00PM", time.Date(0, 1, 1, 23, 0, 0, 0, time.UTC), false}, // Kitchen + {"Nov 10 23:00:00", time.Date(0, 11, 10, 23, 0, 0, 0, time.UTC), false}, // Stamp + {"Nov 10 23:00:00.000", time.Date(0, 11, 10, 23, 0, 0, 0, time.UTC), false}, // StampMilli + {"Nov 10 23:00:00.000000", time.Date(0, 11, 10, 23, 0, 0, 0, time.UTC), false}, // StampMicro + {"Nov 10 23:00:00.000000000", time.Date(0, 11, 10, 23, 0, 0, 0, time.UTC), false}, // StampNano + {"2016-03-06 15:28:01-00:00", time.Date(2016, 3, 6, 15, 28, 1, 0, time.UTC), false}, // RFC3339 without T + {"2016-03-06 15:28:01", time.Date(2016, 3, 6, 15, 28, 1, 0, time.UTC), false}, + {"2016-03-06 15:28:01 -0000", time.Date(2016, 3, 6, 15, 28, 1, 0, time.UTC), false}, + {"2016-03-06 15:28:01 -00:00", time.Date(2016, 3, 6, 15, 28, 1, 0, time.UTC), false}, + {"2006-01-02", time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), false}, + {"02 Jan 2006", time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), false}, + {1472574600, time.Date(2016, 8, 30, 16, 30, 0, 0, time.UTC), false}, + {int(1482597504), time.Date(2016, 12, 24, 16, 38, 24, 0, time.UTC), false}, + {int64(1234567890), time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), false}, + {int32(1234567890), time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), false}, + {uint(1482597504), time.Date(2016, 12, 24, 16, 38, 24, 0, time.UTC), false}, + {uint64(1234567890), time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), false}, + {uint32(1234567890), time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), false}, + {time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), false}, + // errors + {"2006", time.Time{}, true}, + {testing.T{}, time.Time{}, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToTimeE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v.UTC(), errmsg) + + // Non-E test + v = ToTime(test.input) + assert.Equal(t, test.expect, v.UTC(), errmsg) + } +} + +func TestToDurationE(t *testing.T) { + var td time.Duration = 5 + + tests := []struct { + input interface{} + expect time.Duration + iserr bool + }{ + {time.Duration(5), td, false}, + {int(5), td, false}, + {int64(5), td, false}, + {int32(5), td, false}, + {int16(5), td, false}, + {int8(5), td, false}, + {uint(5), td, false}, + {uint64(5), td, false}, + {uint32(5), td, false}, + {uint16(5), td, false}, + {uint8(5), td, false}, + {float64(5), td, false}, + {float32(5), td, false}, + {string("5"), td, false}, + {string("5ns"), td, false}, + {string("5us"), time.Microsecond * td, false}, + {string("5µs"), time.Microsecond * td, false}, + {string("5ms"), time.Millisecond * td, false}, + {string("5s"), time.Second * td, false}, + {string("5m"), time.Minute * td, false}, + {string("5h"), time.Hour * td, false}, + // errors + {"test", 0, true}, + {testing.T{}, 0, true}, + } + + for i, test := range tests { + errmsg := fmt.Sprintf("i = %d", i) // assert helper message + + v, err := ToDurationE(test.input) + if test.iserr { + assert.Error(t, err, errmsg) + continue + } + + assert.NoError(t, err, errmsg) + assert.Equal(t, test.expect, v, errmsg) + + // Non-E test + v = ToDuration(test.input) + assert.Equal(t, test.expect, v, errmsg) + } +} diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go new file mode 100644 index 000000000..4fe192893 --- /dev/null +++ b/vendor/github.com/spf13/cast/caste.go @@ -0,0 +1,1166 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "html/template" + "reflect" + "strconv" + "strings" + "time" +) + +var errNegativeNotAllowed = errors.New("unable to cast negative value") + +// ToTimeE casts an interface to a time.Time type. +func ToTimeE(i interface{}) (tim time.Time, err error) { + i = indirect(i) + + switch v := i.(type) { + case time.Time: + return v, nil + case string: + return StringToDate(v) + case int: + return time.Unix(int64(v), 0), nil + case int64: + return time.Unix(v, 0), nil + case int32: + return time.Unix(int64(v), 0), nil + case uint: + return time.Unix(int64(v), 0), nil + case uint64: + return time.Unix(int64(v), 0), nil + case uint32: + return time.Unix(int64(v), 0), nil + default: + return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) + } +} + +// ToDurationE casts an interface to a time.Duration type. +func ToDurationE(i interface{}) (d time.Duration, err error) { + i = indirect(i) + + switch s := i.(type) { + case time.Duration: + return s, nil + case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: + d = time.Duration(ToInt64(s)) + return + case float32, float64: + d = time.Duration(ToFloat64(s)) + return + case string: + if strings.ContainsAny(s, "nsuµmh") { + d, err = time.ParseDuration(s) + } else { + d, err = time.ParseDuration(s + "ns") + } + return + default: + err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) + return + } +} + +// ToBoolE casts an interface to a bool type. +func ToBoolE(i interface{}) (bool, error) { + i = indirect(i) + + switch b := i.(type) { + case bool: + return b, nil + case nil: + return false, nil + case int: + if i.(int) != 0 { + return true, nil + } + return false, nil + case string: + return strconv.ParseBool(i.(string)) + default: + return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) + } +} + +// ToFloat64E casts an interface to a float64 type. +func ToFloat64E(i interface{}) (float64, error) { + i = indirect(i) + + switch s := i.(type) { + case float64: + return s, nil + case float32: + return float64(s), nil + case int: + return float64(s), nil + case int64: + return float64(s), nil + case int32: + return float64(s), nil + case int16: + return float64(s), nil + case int8: + return float64(s), nil + case uint: + return float64(s), nil + case uint64: + return float64(s), nil + case uint32: + return float64(s), nil + case uint16: + return float64(s), nil + case uint8: + return float64(s), nil + case string: + v, err := strconv.ParseFloat(s, 64) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + } +} + +// ToFloat32E casts an interface to a float32 type. +func ToFloat32E(i interface{}) (float32, error) { + i = indirect(i) + + switch s := i.(type) { + case float64: + return float32(s), nil + case float32: + return s, nil + case int: + return float32(s), nil + case int64: + return float32(s), nil + case int32: + return float32(s), nil + case int16: + return float32(s), nil + case int8: + return float32(s), nil + case uint: + return float32(s), nil + case uint64: + return float32(s), nil + case uint32: + return float32(s), nil + case uint16: + return float32(s), nil + case uint8: + return float32(s), nil + case string: + v, err := strconv.ParseFloat(s, 32) + if err == nil { + return float32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + } +} + +// ToInt64E casts an interface to an int64 type. +func ToInt64E(i interface{}) (int64, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int64(s), nil + case int64: + return s, nil + case int32: + return int64(s), nil + case int16: + return int64(s), nil + case int8: + return int64(s), nil + case uint: + return int64(s), nil + case uint64: + return int64(s), nil + case uint32: + return int64(s), nil + case uint16: + return int64(s), nil + case uint8: + return int64(s), nil + case float64: + return int64(s), nil + case float32: + return int64(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + } +} + +// ToInt32E casts an interface to an int32 type. +func ToInt32E(i interface{}) (int32, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int32(s), nil + case int64: + return int32(s), nil + case int32: + return s, nil + case int16: + return int32(s), nil + case int8: + return int32(s), nil + case uint: + return int32(s), nil + case uint64: + return int32(s), nil + case uint32: + return int32(s), nil + case uint16: + return int32(s), nil + case uint8: + return int32(s), nil + case float64: + return int32(s), nil + case float32: + return int32(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + } +} + +// ToInt16E casts an interface to an int16 type. +func ToInt16E(i interface{}) (int16, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int16(s), nil + case int64: + return int16(s), nil + case int32: + return int16(s), nil + case int16: + return s, nil + case int8: + return int16(s), nil + case uint: + return int16(s), nil + case uint64: + return int16(s), nil + case uint32: + return int16(s), nil + case uint16: + return int16(s), nil + case uint8: + return int16(s), nil + case float64: + return int16(s), nil + case float32: + return int16(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + } +} + +// ToInt8E casts an interface to an int8 type. +func ToInt8E(i interface{}) (int8, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int8(s), nil + case int64: + return int8(s), nil + case int32: + return int8(s), nil + case int16: + return int8(s), nil + case int8: + return s, nil + case uint: + return int8(s), nil + case uint64: + return int8(s), nil + case uint32: + return int8(s), nil + case uint16: + return int8(s), nil + case uint8: + return int8(s), nil + case float64: + return int8(s), nil + case float32: + return int8(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + } +} + +// ToIntE casts an interface to an int type. +func ToIntE(i interface{}) (int, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return s, nil + case int64: + return int(s), nil + case int32: + return int(s), nil + case int16: + return int(s), nil + case int8: + return int(s), nil + case uint: + return int(s), nil + case uint64: + return int(s), nil + case uint32: + return int(s), nil + case uint16: + return int(s), nil + case uint8: + return int(s), nil + case float64: + return int(s), nil + case float32: + return int(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) + } +} + +// ToUintE casts an interface to a uint type. +func ToUintE(i interface{}) (uint, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 0) + if err == nil { + return uint(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case uint: + return s, nil + case uint64: + return uint(s), nil + case uint32: + return uint(s), nil + case uint16: + return uint(s), nil + case uint8: + return uint(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) + } +} + +// ToUint64E casts an interface to a uint64 type. +func ToUint64E(i interface{}) (uint64, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 64) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case uint: + return uint64(s), nil + case uint64: + return s, nil + case uint32: + return uint64(s), nil + case uint16: + return uint64(s), nil + case uint8: + return uint64(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) + } +} + +// ToUint32E casts an interface to a uint32 type. +func ToUint32E(i interface{}) (uint32, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 32) + if err == nil { + return uint32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case uint: + return uint32(s), nil + case uint64: + return uint32(s), nil + case uint32: + return s, nil + case uint16: + return uint32(s), nil + case uint8: + return uint32(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) + } +} + +// ToUint16E casts an interface to a uint16 type. +func ToUint16E(i interface{}) (uint16, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 16) + if err == nil { + return uint16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case uint: + return uint16(s), nil + case uint64: + return uint16(s), nil + case uint32: + return uint16(s), nil + case uint16: + return s, nil + case uint8: + return uint16(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) + } +} + +// ToUint8E casts an interface to a uint type. +func ToUint8E(i interface{}) (uint8, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 8) + if err == nil { + return uint8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case uint: + return uint8(s), nil + case uint64: + return uint8(s), nil + case uint32: + return uint8(s), nil + case uint16: + return uint8(s), nil + case uint8: + return s, nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) + } +} + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirect returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil). +func indirect(a interface{}) interface{} { + if a == nil { + return nil + } + if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { + // Avoid creating a reflect.Value if it's not a pointer. + return a + } + v := reflect.ValueOf(a) + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + return v.Interface() +} + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirectToStringerOrError returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer +// or error, +func indirectToStringerOrError(a interface{}) interface{} { + if a == nil { + return nil + } + + var errorType = reflect.TypeOf((*error)(nil)).Elem() + var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + + v := reflect.ValueOf(a) + for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + return v.Interface() +} + +// ToStringE casts an interface to a string type. +func ToStringE(i interface{}) (string, error) { + i = indirectToStringerOrError(i) + + switch s := i.(type) { + case string: + return s, nil + case bool: + return strconv.FormatBool(s), nil + case float64: + return strconv.FormatFloat(s, 'f', -1, 64), nil + case float32: + return strconv.FormatFloat(float64(s), 'f', -1, 32), nil + case int: + return strconv.Itoa(s), nil + case int64: + return strconv.FormatInt(s, 10), nil + case int32: + return strconv.Itoa(int(s)), nil + case int16: + return strconv.FormatInt(int64(s), 10), nil + case int8: + return strconv.FormatInt(int64(s), 10), nil + case uint: + return strconv.FormatInt(int64(s), 10), nil + case uint64: + return strconv.FormatInt(int64(s), 10), nil + case uint32: + return strconv.FormatInt(int64(s), 10), nil + case uint16: + return strconv.FormatInt(int64(s), 10), nil + case uint8: + return strconv.FormatInt(int64(s), 10), nil + case []byte: + return string(s), nil + case template.HTML: + return string(s), nil + case template.URL: + return string(s), nil + case template.JS: + return string(s), nil + case template.CSS: + return string(s), nil + case template.HTMLAttr: + return string(s), nil + case nil: + return "", nil + case fmt.Stringer: + return s.String(), nil + case error: + return s.Error(), nil + default: + return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) + } +} + +// ToStringMapStringE casts an interface to a map[string]string type. +func ToStringMapStringE(i interface{}) (map[string]string, error) { + var m = map[string]string{} + + switch v := i.(type) { + case map[string]string: + return v, nil + case map[string]interface{}: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case map[interface{}]string: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) + } +} + +// ToStringMapStringSliceE casts an interface to a map[string][]string type. +func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { + var m = map[string][]string{} + + switch v := i.(type) { + case map[string][]string: + return v, nil + case map[string][]interface{}: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[string]string: + for k, val := range v { + m[ToString(k)] = []string{val} + } + case map[string]interface{}: + for k, val := range v { + switch vt := val.(type) { + case []interface{}: + m[ToString(k)] = ToStringSlice(vt) + case []string: + m[ToString(k)] = vt + default: + m[ToString(k)] = []string{ToString(val)} + } + } + return m, nil + case map[interface{}][]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}][]interface{}: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}]interface{}: + for k, val := range v { + key, err := ToStringE(k) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + value, err := ToStringSliceE(val) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + m[key] = value + } + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + return m, nil +} + +// ToStringMapBoolE casts an interface to a map[string]bool type. +func ToStringMapBoolE(i interface{}) (map[string]bool, error) { + var m = map[string]bool{} + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToBool(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[ToString(k)] = ToBool(val) + } + return m, nil + case map[string]bool: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) + } +} + +// ToStringMapE casts an interface to a map[string]interface{} type. +func ToStringMapE(i interface{}) (map[string]interface{}, error) { + var m = map[string]interface{}{} + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = val + } + return m, nil + case map[string]interface{}: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) + } +} + +// ToSliceE casts an interface to a []interface{} type. +func ToSliceE(i interface{}) ([]interface{}, error) { + var s []interface{} + + switch v := i.(type) { + case []interface{}: + return append(s, v...), nil + case []map[string]interface{}: + for _, u := range v { + s = append(s, u) + } + return s, nil + default: + return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) + } +} + +// ToBoolSliceE casts an interface to a []bool type. +func ToBoolSliceE(i interface{}) ([]bool, error) { + if i == nil { + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } + + switch v := i.(type) { + case []bool: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]bool, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToBoolE(s.Index(j).Interface()) + if err != nil { + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } + a[j] = val + } + return a, nil + default: + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } +} + +// ToStringSliceE casts an interface to a []string type. +func ToStringSliceE(i interface{}) ([]string, error) { + var a []string + + switch v := i.(type) { + case []interface{}: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []string: + return v, nil + case string: + return strings.Fields(v), nil + case interface{}: + str, err := ToStringE(v) + if err != nil { + return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) + } + return []string{str}, nil + default: + return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) + } +} + +// ToIntSliceE casts an interface to a []int type. +func ToIntSliceE(i interface{}) ([]int, error) { + if i == nil { + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } + + switch v := i.(type) { + case []int: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]int, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToIntE(s.Index(j).Interface()) + if err != nil { + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } + a[j] = val + } + return a, nil + default: + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } +} + +// ToDurationSliceE casts an interface to a []time.Duration type. +func ToDurationSliceE(i interface{}) ([]time.Duration, error) { + if i == nil { + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } + + switch v := i.(type) { + case []time.Duration: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]time.Duration, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToDurationE(s.Index(j).Interface()) + if err != nil { + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } + a[j] = val + } + return a, nil + default: + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } +} + +// StringToDate attempts to parse a string into a time.Time type using a +// predefined list of formats. If no suitable format is found, an error is +// returned. +func StringToDate(s string) (time.Time, error) { + return parseDateWith(s, []string{ + time.RFC3339, + "2006-01-02T15:04:05", // iso8601 without timezone + time.RFC1123Z, + time.RFC1123, + time.RFC822Z, + time.RFC822, + time.RFC850, + time.ANSIC, + time.UnixDate, + time.RubyDate, + "2006-01-02 15:04:05.999999999 -0700 MST", // Time.String() + "2006-01-02", + "02 Jan 2006", + "2006-01-02 15:04:05 -07:00", + "2006-01-02 15:04:05 -0700", + "2006-01-02 15:04:05Z07:00", // RFC3339 without T + "2006-01-02 15:04:05", + time.Kitchen, + time.Stamp, + time.StampMilli, + time.StampMicro, + time.StampNano, + }) +} + +func parseDateWith(s string, dates []string) (d time.Time, e error) { + for _, dateType := range dates { + if d, e = time.Parse(dateType, s); e == nil { + return + } + } + return d, fmt.Errorf("unable to parse date: %s", s) +} + +// jsonStringToObject attempts to unmarshall a string as JSON into +// the object passed as pointer. +func jsonStringToObject(s string, v interface{}) error { + data := []byte(s) + return json.Unmarshal(data, v) +} diff --git a/vendor/github.com/spf13/jwalterweatherman/.gitignore b/vendor/github.com/spf13/jwalterweatherman/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/spf13/jwalterweatherman/LICENSE b/vendor/github.com/spf13/jwalterweatherman/LICENSE new file mode 100644 index 000000000..4527efb9c --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/jwalterweatherman/README.md b/vendor/github.com/spf13/jwalterweatherman/README.md new file mode 100644 index 000000000..d8cfd27ab --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/README.md @@ -0,0 +1,148 @@ +jWalterWeatherman +================= + +Seamless printing to the terminal (stdout) and logging to a io.Writer +(file) that’s as easy to use as fmt.Println. + +![and_that__s_why_you_always_leave_a_note_by_jonnyetc-d57q7um](https://cloud.githubusercontent.com/assets/173412/11002937/ccd01654-847d-11e5-828e-12ebaf582eaf.jpg) +Graphic by [JonnyEtc](http://jonnyetc.deviantart.com/art/And-That-s-Why-You-Always-Leave-a-Note-315311422) + +JWW is primarily a wrapper around the excellent standard log library. It +provides a few advantages over using the standard log library alone. + +1. Ready to go out of the box. +2. One library for both printing to the terminal and logging (to files). +3. Really easy to log to either a temp file or a file you specify. + + +I really wanted a very straightforward library that could seamlessly do +the following things. + +1. Replace all the println, printf, etc statements thought my code with + something more useful +2. Allow the user to easily control what levels are printed to stdout +3. Allow the user to easily control what levels are logged +4. Provide an easy mechanism (like fmt.Println) to print info to the user + which can be easily logged as well +5. Due to 2 & 3 provide easy verbose mode for output and logs +6. Not have any unnecessary initialization cruft. Just use it. + +# Usage + +## Step 1. Use it +Put calls throughout your source based on type of feedback. +No initialization or setup needs to happen. Just start calling things. + +Available Loggers are: + + * TRACE + * DEBUG + * INFO + * WARN + * ERROR + * CRITICAL + * FATAL + +These each are loggers based on the log standard library and follow the +standard usage. Eg. + +```go + import ( + jww "github.com/spf13/jwalterweatherman" + ) + + ... + + if err != nil { + + // This is a pretty serious error and the user should know about + // it. It will be printed to the terminal as well as logged under the + // default thresholds. + + jww.ERROR.Println(err) + } + + if err2 != nil { + // This error isn’t going to materially change the behavior of the + // application, but it’s something that may not be what the user + // expects. Under the default thresholds, Warn will be logged, but + // not printed to the terminal. + + jww.WARN.Println(err2) + } + + // Information that’s relevant to what’s happening, but not very + // important for the user. Under the default thresholds this will be + // discarded. + + jww.INFO.Printf("information %q", response) + +``` + +NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook: + +```go +notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) +notepad.WARN.Println("Some warning"") +``` + +_Why 7 levels?_ + +Maybe you think that 7 levels are too much for any application... and you +are probably correct. Just because there are seven levels doesn’t mean +that you should be using all 7 levels. Pick the right set for your needs. +Remember they only have to mean something to your project. + +## Step 2. Optionally configure JWW + +Under the default thresholds : + + * Debug, Trace & Info goto /dev/null + * Warn and above is logged (when a log file/io.Writer is provided) + * Error and above is printed to the terminal (stdout) + +### Changing the thresholds + +The threshold can be changed at any time, but will only affect calls that +execute after the change was made. + +This is very useful if your application has a verbose mode. Of course you +can decide what verbose means to you or even have multiple levels of +verbosity. + + +```go + import ( + jww "github.com/spf13/jwalterweatherman" + ) + + if Verbose { + jww.SetLogThreshold(jww.LevelTrace) + jww.SetStdoutThreshold(jww.LevelInfo) + } +``` + +Note that JWW's own internal output uses log levels as well, so set the log +level before making any other calls if you want to see what it's up to. + + +### Setting a log file + +JWW can log to any `io.Writer`: + + +```go + + jww.SetLogOutput(customWriter) + +``` + + +# More information + +This is an early release. I’ve been using it for a while and this is the +third interface I’ve tried. I like this one pretty well, but no guarantees +that it won’t change a bit. + +I wrote this for use in [hugo](https://gohugo.io). If you are looking +for a static website engine that’s super fast please checkout Hugo. diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go new file mode 100644 index 000000000..bcb763403 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go @@ -0,0 +1,113 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "io" + "io/ioutil" + "log" + "os" +) + +var ( + TRACE *log.Logger + DEBUG *log.Logger + INFO *log.Logger + WARN *log.Logger + ERROR *log.Logger + CRITICAL *log.Logger + FATAL *log.Logger + + LOG *log.Logger + FEEDBACK *Feedback + + defaultNotepad *Notepad +) + +func reloadDefaultNotepad() { + TRACE = defaultNotepad.TRACE + DEBUG = defaultNotepad.DEBUG + INFO = defaultNotepad.INFO + WARN = defaultNotepad.WARN + ERROR = defaultNotepad.ERROR + CRITICAL = defaultNotepad.CRITICAL + FATAL = defaultNotepad.FATAL + + LOG = defaultNotepad.LOG + FEEDBACK = defaultNotepad.FEEDBACK +} + +func init() { + defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) + reloadDefaultNotepad() +} + +// SetLogThreshold set the log threshold for the default notepad. Trace by default. +func SetLogThreshold(threshold Threshold) { + defaultNotepad.SetLogThreshold(threshold) + reloadDefaultNotepad() +} + +// SetLogOutput set the log output for the default notepad. Discarded by default. +func SetLogOutput(handle io.Writer) { + defaultNotepad.SetLogOutput(handle) + reloadDefaultNotepad() +} + +// SetStdoutThreshold set the standard output threshold for the default notepad. +// Info by default. +func SetStdoutThreshold(threshold Threshold) { + defaultNotepad.SetStdoutThreshold(threshold) + reloadDefaultNotepad() +} + +// SetPrefix set the prefix for the default logger. Empty by default. +func SetPrefix(prefix string) { + defaultNotepad.SetPrefix(prefix) + reloadDefaultNotepad() +} + +// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default. +func SetFlags(flags int) { + defaultNotepad.SetFlags(flags) + reloadDefaultNotepad() +} + +// Level returns the current global log threshold. +func LogThreshold() Threshold { + return defaultNotepad.logThreshold +} + +// Level returns the current global output threshold. +func StdoutThreshold() Threshold { + return defaultNotepad.stdoutThreshold +} + +// GetStdoutThreshold returns the defined Treshold for the log logger. +func GetLogThreshold() Threshold { + return defaultNotepad.GetLogThreshold() +} + +// GetStdoutThreshold returns the Treshold for the stdout logger. +func GetStdoutThreshold() Threshold { + return defaultNotepad.GetStdoutThreshold() +} + +// LogCountForLevel returns the number of log invocations for a given threshold. +func LogCountForLevel(l Threshold) uint64 { + return defaultNotepad.LogCountForLevel(l) +} + +// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations +// greater than or equal to a given threshold. +func LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 { + return defaultNotepad.LogCountForLevelsGreaterThanorEqualTo(threshold) +} + +// ResetLogCounters resets the invocation counters for all levels. +func ResetLogCounters() { + defaultNotepad.ResetLogCounters() +} diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad_test.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad_test.go new file mode 100644 index 000000000..2670c8d96 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad_test.go @@ -0,0 +1,102 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "bytes" + "io/ioutil" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestThresholds(t *testing.T) { + SetStdoutThreshold(LevelError) + require.Equal(t, StdoutThreshold(), LevelError) + SetLogThreshold(LevelCritical) + require.Equal(t, LogThreshold(), LevelCritical) + require.NotEqual(t, StdoutThreshold(), LevelCritical) + SetStdoutThreshold(LevelWarn) + require.Equal(t, StdoutThreshold(), LevelWarn) +} + +func TestDefaultLogging(t *testing.T) { + var outputBuf, logBuf bytes.Buffer + + defaultNotepad.logHandle = &logBuf + defaultNotepad.outHandle = &outputBuf + + SetLogThreshold(LevelWarn) + SetStdoutThreshold(LevelError) + + FATAL.Println("fatal err") + CRITICAL.Println("critical err") + ERROR.Println("an error") + WARN.Println("a warning") + INFO.Println("information") + DEBUG.Println("debugging info") + TRACE.Println("trace") + + require.Contains(t, logBuf.String(), "fatal err") + require.Contains(t, logBuf.String(), "critical err") + require.Contains(t, logBuf.String(), "an error") + require.Contains(t, logBuf.String(), "a warning") + require.NotContains(t, logBuf.String(), "information") + require.NotContains(t, logBuf.String(), "debugging info") + require.NotContains(t, logBuf.String(), "trace") + + require.Contains(t, outputBuf.String(), "fatal err") + require.Contains(t, outputBuf.String(), "critical err") + require.Contains(t, outputBuf.String(), "an error") + require.NotContains(t, outputBuf.String(), "a warning") + require.NotContains(t, outputBuf.String(), "information") + require.NotContains(t, outputBuf.String(), "debugging info") + require.NotContains(t, outputBuf.String(), "trace") +} + +func TestLogCounter(t *testing.T) { + defaultNotepad.logHandle = ioutil.Discard + defaultNotepad.outHandle = ioutil.Discard + + SetLogThreshold(LevelTrace) + SetStdoutThreshold(LevelTrace) + + FATAL.Println("fatal err") + CRITICAL.Println("critical err") + WARN.Println("a warning") + WARN.Println("another warning") + INFO.Println("information") + DEBUG.Println("debugging info") + TRACE.Println("trace") + + wg := &sync.WaitGroup{} + + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < 10; j++ { + ERROR.Println("error", j) + // check for data races + require.True(t, LogCountForLevel(LevelError) > uint64(j)) + require.True(t, LogCountForLevelsGreaterThanorEqualTo(LevelError) > uint64(j)) + } + }() + + } + + wg.Wait() + + require.Equal(t, uint64(1), LogCountForLevel(LevelFatal)) + require.Equal(t, uint64(1), LogCountForLevel(LevelCritical)) + require.Equal(t, uint64(2), LogCountForLevel(LevelWarn)) + require.Equal(t, uint64(1), LogCountForLevel(LevelInfo)) + require.Equal(t, uint64(1), LogCountForLevel(LevelDebug)) + require.Equal(t, uint64(1), LogCountForLevel(LevelTrace)) + require.Equal(t, uint64(100), LogCountForLevel(LevelError)) + require.Equal(t, uint64(102), LogCountForLevelsGreaterThanorEqualTo(LevelError)) +} diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go new file mode 100644 index 000000000..11423ac41 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/log_counter.go @@ -0,0 +1,55 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "sync/atomic" +) + +type logCounter struct { + counter uint64 +} + +func (c *logCounter) incr() { + atomic.AddUint64(&c.counter, 1) +} + +func (c *logCounter) resetCounter() { + atomic.StoreUint64(&c.counter, 0) +} + +func (c *logCounter) getCount() uint64 { + return atomic.LoadUint64(&c.counter) +} + +func (c *logCounter) Write(p []byte) (n int, err error) { + c.incr() + return len(p), nil +} + +// LogCountForLevel returns the number of log invocations for a given threshold. +func (n *Notepad) LogCountForLevel(l Threshold) uint64 { + return n.logCounters[l].getCount() +} + +// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations +// greater than or equal to a given threshold. +func (n *Notepad) LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 { + var cnt uint64 + + for i := int(threshold); i < len(n.logCounters); i++ { + cnt += n.LogCountForLevel(Threshold(i)) + } + + return cnt +} + +// ResetLogCounters resets the invocation counters for all levels. +func (n *Notepad) ResetLogCounters() { + for _, np := range n.logCounters { + np.resetCounter() + } +} diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go new file mode 100644 index 000000000..ae5aaf711 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go @@ -0,0 +1,194 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "fmt" + "io" + "log" +) + +type Threshold int + +func (t Threshold) String() string { + return prefixes[t] +} + +const ( + LevelTrace Threshold = iota + LevelDebug + LevelInfo + LevelWarn + LevelError + LevelCritical + LevelFatal +) + +var prefixes map[Threshold]string = map[Threshold]string{ + LevelTrace: "TRACE", + LevelDebug: "DEBUG", + LevelInfo: "INFO", + LevelWarn: "WARN", + LevelError: "ERROR", + LevelCritical: "CRITICAL", + LevelFatal: "FATAL", +} + +// Notepad is where you leave a note! +type Notepad struct { + TRACE *log.Logger + DEBUG *log.Logger + INFO *log.Logger + WARN *log.Logger + ERROR *log.Logger + CRITICAL *log.Logger + FATAL *log.Logger + + LOG *log.Logger + FEEDBACK *Feedback + + loggers [7]**log.Logger + logHandle io.Writer + outHandle io.Writer + logThreshold Threshold + stdoutThreshold Threshold + prefix string + flags int + + // One per Threshold + logCounters [7]*logCounter +} + +// NewNotepad create a new notepad. +func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad { + n := &Notepad{} + + n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL} + n.outHandle = outHandle + n.logHandle = logHandle + n.stdoutThreshold = outThreshold + n.logThreshold = logThreshold + + if len(prefix) != 0 { + n.prefix = "[" + prefix + "] " + } else { + n.prefix = "" + } + + n.flags = flags + + n.LOG = log.New(n.logHandle, + "LOG: ", + n.flags) + n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG} + + n.init() + return n +} + +// init creates the loggers for each level depending on the notepad thresholds. +func (n *Notepad) init() { + logAndOut := io.MultiWriter(n.outHandle, n.logHandle) + + for t, logger := range n.loggers { + threshold := Threshold(t) + counter := &logCounter{} + n.logCounters[t] = counter + prefix := n.prefix + threshold.String() + " " + + switch { + case threshold >= n.logThreshold && threshold >= n.stdoutThreshold: + *logger = log.New(io.MultiWriter(counter, logAndOut), prefix, n.flags) + + case threshold >= n.logThreshold: + *logger = log.New(io.MultiWriter(counter, n.logHandle), prefix, n.flags) + + case threshold >= n.stdoutThreshold: + *logger = log.New(io.MultiWriter(counter, n.outHandle), prefix, n.flags) + + default: + // counter doesn't care about prefix and flags, so don't use them + // for performance. + *logger = log.New(counter, "", 0) + } + } +} + +// SetLogThreshold changes the threshold above which messages are written to the +// log file. +func (n *Notepad) SetLogThreshold(threshold Threshold) { + n.logThreshold = threshold + n.init() +} + +// SetLogOutput changes the file where log messages are written. +func (n *Notepad) SetLogOutput(handle io.Writer) { + n.logHandle = handle + n.init() +} + +// GetStdoutThreshold returns the defined Treshold for the log logger. +func (n *Notepad) GetLogThreshold() Threshold { + return n.logThreshold +} + +// SetStdoutThreshold changes the threshold above which messages are written to the +// standard output. +func (n *Notepad) SetStdoutThreshold(threshold Threshold) { + n.stdoutThreshold = threshold + n.init() +} + +// GetStdoutThreshold returns the Treshold for the stdout logger. +func (n *Notepad) GetStdoutThreshold() Threshold { + return n.stdoutThreshold +} + +// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between +// brackets at the beginning of the line. An empty prefix won't be displayed at all. +func (n *Notepad) SetPrefix(prefix string) { + if len(prefix) != 0 { + n.prefix = "[" + prefix + "] " + } else { + n.prefix = "" + } + n.init() +} + +// SetFlags choose which flags the logger will display (after prefix and message +// level). See the package log for more informations on this. +func (n *Notepad) SetFlags(flags int) { + n.flags = flags + n.init() +} + +// Feedback writes plainly to the outHandle while +// logging with the standard extra information (date, file, etc). +type Feedback struct { + out *log.Logger + log *log.Logger +} + +func (fb *Feedback) Println(v ...interface{}) { + fb.output(fmt.Sprintln(v...)) +} + +func (fb *Feedback) Printf(format string, v ...interface{}) { + fb.output(fmt.Sprintf(format, v...)) +} + +func (fb *Feedback) Print(v ...interface{}) { + fb.output(fmt.Sprint(v...)) +} + +func (fb *Feedback) output(s string) { + if fb.out != nil { + fb.out.Output(2, s) + } + if fb.log != nil { + fb.log.Output(2, s) + } +} diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad_test.go b/vendor/github.com/spf13/jwalterweatherman/notepad_test.go new file mode 100644 index 000000000..69ad6f8fc --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/notepad_test.go @@ -0,0 +1,50 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNotepad(t *testing.T) { + var logHandle, outHandle bytes.Buffer + + n := NewNotepad(LevelCritical, LevelError, &outHandle, &logHandle, "TestNotePad", 0) + + require.Equal(t, LevelCritical, n.GetStdoutThreshold()) + require.Equal(t, LevelError, n.GetLogThreshold()) + + n.DEBUG.Println("Some debug") + n.ERROR.Println("Some error") + n.CRITICAL.Println("Some critical error") + + require.Contains(t, logHandle.String(), "[TestNotePad] ERROR Some error") + require.NotContains(t, logHandle.String(), "Some debug") + require.NotContains(t, outHandle.String(), "Some error") + require.Contains(t, outHandle.String(), "Some critical error") + + require.Equal(t, n.LogCountForLevel(LevelError), uint64(1)) + require.Equal(t, n.LogCountForLevel(LevelDebug), uint64(1)) + require.Equal(t, n.LogCountForLevel(LevelTrace), uint64(0)) +} + +func TestThresholdString(t *testing.T) { + require.Equal(t, LevelError.String(), "ERROR") + require.Equal(t, LevelTrace.String(), "TRACE") +} + +func BenchmarkLogPrintOnlyToCounter(b *testing.B) { + var logHandle, outHandle bytes.Buffer + n := NewNotepad(LevelCritical, LevelCritical, &outHandle, &logHandle, "TestNotePad", 0) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + n.INFO.Print("Test") + } +} diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore new file mode 100644 index 000000000..352a34a56 --- /dev/null +++ b/vendor/github.com/spf13/viper/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.bench \ No newline at end of file diff --git a/vendor/github.com/spf13/viper/.travis.yml b/vendor/github.com/spf13/viper/.travis.yml new file mode 100644 index 000000000..55960d11b --- /dev/null +++ b/vendor/github.com/spf13/viper/.travis.yml @@ -0,0 +1,28 @@ +go_import_path: github.com/spf13/viper + +language: go +go: + - 1.7.x + - 1.8.x + - 1.9.x + - tip + +os: + - linux + - osx + +matrix: + allow_failures: + - go: tip + fast_finish: true + +script: + - go install ./... + - diff -u <(echo -n) <(gofmt -d .) + - go test -v ./... + +after_success: + - go get -u -d github.com/spf13/hugo + - cd $GOPATH/src/github.com/spf13/hugo && make && ./hugo -s docs && cd - + +sudo: false diff --git a/vendor/github.com/spf13/viper/LICENSE b/vendor/github.com/spf13/viper/LICENSE new file mode 100644 index 000000000..4527efb9c --- /dev/null +++ b/vendor/github.com/spf13/viper/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md new file mode 100644 index 000000000..64bf47435 --- /dev/null +++ b/vendor/github.com/spf13/viper/README.md @@ -0,0 +1,643 @@ +![viper logo](https://cloud.githubusercontent.com/assets/173412/10886745/998df88a-8151-11e5-9448-4736db51020d.png) + +Go configuration with fangs! + +Many Go projects are built using Viper including: + +* [Hugo](http://gohugo.io) +* [EMC RexRay](http://rexray.readthedocs.org/en/stable/) +* [Imgur’s Incus](https://github.com/Imgur/incus) +* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) +* [Docker Notary](https://github.com/docker/Notary) +* [BloomApi](https://www.bloomapi.com/) +* [doctl](https://github.com/digitalocean/doctl) +* [Clairctl](https://github.com/jgsqware/clairctl) + +[![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![GoDoc](https://godoc.org/github.com/spf13/viper?status.svg)](https://godoc.org/github.com/spf13/viper) + + +## What is Viper? + +Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed +to work within an application, and can handle all types of configuration needs +and formats. It supports: + +* setting defaults +* reading from JSON, TOML, YAML, HCL, and Java properties config files +* live watching and re-reading of config files (optional) +* reading from environment variables +* reading from remote config systems (etcd or Consul), and watching changes +* reading from command line flags +* reading from buffer +* setting explicit values + +Viper can be thought of as a registry for all of your applications +configuration needs. + +## Why Viper? + +When building a modern application, you don’t want to worry about +configuration file formats; you want to focus on building awesome software. +Viper is here to help with that. + +Viper does the following for you: + +1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, or Java properties formats. +2. Provide a mechanism to set default values for your different + configuration options. +3. Provide a mechanism to set override values for options specified through + command line flags. +4. Provide an alias system to easily rename parameters without breaking existing + code. +5. Make it easy to tell the difference between when a user has provided a + command line or config file which is the same as the default. + +Viper uses the following precedence order. Each item takes precedence over the +item below it: + + * explicit call to Set + * flag + * env + * config + * key/value store + * default + +Viper configuration keys are case insensitive. + +## Putting Values into Viper + +### Establishing Defaults + +A good configuration system will support default values. A default value is not +required for a key, but it’s useful in the event that a key hasn’t been set via +config file, environment variable, remote configuration or flag. + +Examples: + +```go +viper.SetDefault("ContentDir", "content") +viper.SetDefault("LayoutDir", "layouts") +viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"}) +``` + +### Reading Config Files + +Viper requires minimal configuration so it knows where to look for config files. +Viper supports JSON, TOML, YAML, HCL, and Java Properties files. Viper can search multiple paths, but +currently a single Viper instance only supports a single configuration file. +Viper does not default to any configuration search paths leaving defaults decision +to an application. + +Here is an example of how to use Viper to search for and read a configuration file. +None of the specific paths are required, but at least one path should be provided +where a configuration file is expected. + +```go +viper.SetConfigName("config") // name of config file (without extension) +viper.AddConfigPath("/etc/appname/") // path to look for the config file in +viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths +viper.AddConfigPath(".") // optionally look for config in the working directory +err := viper.ReadInConfig() // Find and read the config file +if err != nil { // Handle errors reading the config file + panic(fmt.Errorf("Fatal error config file: %s \n", err)) +} +``` + +### Watching and re-reading config files + +Viper supports the ability to have your application live read a config file while running. + +Gone are the days of needing to restart a server to have a config take effect, +viper powered applications can read an update to a config file while running and +not miss a beat. + +Simply tell the viper instance to watchConfig. +Optionally you can provide a function for Viper to run each time a change occurs. + +**Make sure you add all of the configPaths prior to calling `WatchConfig()`** + +```go +viper.WatchConfig() +viper.OnConfigChange(func(e fsnotify.Event) { + fmt.Println("Config file changed:", e.Name) +}) +``` + +### Reading Config from io.Reader + +Viper predefines many configuration sources such as files, environment +variables, flags, and remote K/V store, but you are not bound to them. You can +also implement your own required configuration source and feed it to viper. + +```go +viper.SetConfigType("yaml") // or viper.SetConfigType("YAML") + +// any approach to require this configuration into your program. +var yamlExample = []byte(` +Hacker: true +name: steve +hobbies: +- skateboarding +- snowboarding +- go +clothing: + jacket: leather + trousers: denim +age: 35 +eyes : brown +beard: true +`) + +viper.ReadConfig(bytes.NewBuffer(yamlExample)) + +viper.Get("name") // this would be "steve" +``` + +### Setting Overrides + +These could be from a command line flag, or from your own application logic. + +```go +viper.Set("Verbose", true) +viper.Set("LogFile", LogFile) +``` + +### Registering and Using Aliases + +Aliases permit a single value to be referenced by multiple keys + +```go +viper.RegisterAlias("loud", "Verbose") + +viper.Set("verbose", true) // same result as next line +viper.Set("loud", true) // same result as prior line + +viper.GetBool("loud") // true +viper.GetBool("verbose") // true +``` + +### Working with Environment Variables + +Viper has full support for environment variables. This enables 12 factor +applications out of the box. There are four methods that exist to aid working +with ENV: + + * `AutomaticEnv()` + * `BindEnv(string...) : error` + * `SetEnvPrefix(string)` + * `SetEnvKeyReplacer(string...) *strings.Replacer` + +_When working with ENV variables, it’s important to recognize that Viper +treats ENV variables as case sensitive._ + +Viper provides a mechanism to try to ensure that ENV variables are unique. By +using `SetEnvPrefix`, you can tell Viper to use add a prefix while reading from +the environment variables. Both `BindEnv` and `AutomaticEnv` will use this +prefix. + +`BindEnv` takes one or two parameters. The first parameter is the key name, the +second is the name of the environment variable. The name of the environment +variable is case sensitive. If the ENV variable name is not provided, then +Viper will automatically assume that the key name matches the ENV variable name, +but the ENV variable is IN ALL CAPS. When you explicitly provide the ENV +variable name, it **does not** automatically add the prefix. + +One important thing to recognize when working with ENV variables is that the +value will be read each time it is accessed. Viper does not fix the value when +the `BindEnv` is called. + +`AutomaticEnv` is a powerful helper especially when combined with +`SetEnvPrefix`. When called, Viper will check for an environment variable any +time a `viper.Get` request is made. It will apply the following rules. It will +check for a environment variable with a name matching the key uppercased and +prefixed with the `EnvPrefix` if set. + +`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env +keys to an extent. This is useful if you want to use `-` or something in your +`Get()` calls, but want your environmental variables to use `_` delimiters. An +example of using it can be found in `viper_test.go`. + +#### Env example + +```go +SetEnvPrefix("spf") // will be uppercased automatically +BindEnv("id") + +os.Setenv("SPF_ID", "13") // typically done outside of the app + +id := Get("id") // 13 +``` + +### Working with Flags + +Viper has the ability to bind to flags. Specifically, Viper supports `Pflags` +as used in the [Cobra](https://github.com/spf13/cobra) library. + +Like `BindEnv`, the value is not set when the binding method is called, but when +it is accessed. This means you can bind as early as you want, even in an +`init()` function. + +For individual flags, the `BindPFlag()` method provides this functionality. + +Example: + +```go +serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) +``` + +You can also bind an existing set of pflags (pflag.FlagSet): + +Example: + +```go +pflag.Int("flagname", 1234, "help message for flagname") + +pflag.Parse() +viper.BindPFlags(pflag.CommandLine) + +i := viper.GetInt("flagname") // retrieve values from viper instead of pflag +``` + +The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude +the use of other packages that use the [flag](https://golang.org/pkg/flag/) +package from the standard library. The pflag package can handle the flags +defined for the flag package by importing these flags. This is accomplished +by a calling a convenience function provided by the pflag package called +AddGoFlagSet(). + +Example: + +```go +package main + +import ( + "flag" + "github.com/spf13/pflag" +) + +func main() { + + // using standard library "flag" package + flag.Int("flagname", 1234, "help message for flagname") + + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + viper.BindPFlags(pflag.CommandLine) + + i := viper.GetInt("flagname") // retrieve value from viper + + ... +} +``` + +#### Flag interfaces + +Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`. + +`FlagValue` represents a single flag. This is a very simple example on how to implement this interface: + +```go +type myFlag struct {} +func (f myFlag) HasChanged() bool { return false } +func (f myFlag) Name() string { return "my-flag-name" } +func (f myFlag) ValueString() string { return "my-flag-value" } +func (f myFlag) ValueType() string { return "string" } +``` + +Once your flag implements this interface, you can simply tell Viper to bind it: + +```go +viper.BindFlagValue("my-flag-name", myFlag{}) +``` + +`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface: + +```go +type myFlagSet struct { + flags []myFlag +} + +func (f myFlagSet) VisitAll(fn func(FlagValue)) { + for _, flag := range flags { + fn(flag) + } +} +``` + +Once your flag set implements this interface, you can simply tell Viper to bind it: + +```go +fSet := myFlagSet{ + flags: []myFlag{myFlag{}, myFlag{}}, +} +viper.BindFlagValues("my-flags", fSet) +``` + +### Remote Key/Value Store Support + +To enable remote support in Viper, do a blank import of the `viper/remote` +package: + +`import _ "github.com/spf13/viper/remote"` + +Viper will read a config string (as JSON, TOML, YAML or HCL) retrieved from a path +in a Key/Value store such as etcd or Consul. These values take precedence over +default values, but are overridden by configuration values retrieved from disk, +flags, or environment variables. + +Viper uses [crypt](https://github.com/xordataexchange/crypt) to retrieve +configuration from the K/V store, which means that you can store your +configuration values encrypted and have them automatically decrypted if you have +the correct gpg keyring. Encryption is optional. + +You can use remote configuration in conjunction with local configuration, or +independently of it. + +`crypt` has a command-line helper that you can use to put configurations in your +K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. + +```bash +$ go get github.com/xordataexchange/crypt/bin/crypt +$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json +``` + +Confirm that your value was set: + +```bash +$ crypt get -plaintext /config/hugo.json +``` + +See the `crypt` documentation for examples of how to set encrypted values, or +how to use Consul. + +### Remote Key/Value Store Example - Unencrypted + +```go +viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json") +viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop" +err := viper.ReadRemoteConfig() +``` + +### Remote Key/Value Store Example - Encrypted + +```go +viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg") +viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop" +err := viper.ReadRemoteConfig() +``` + +### Watching Changes in etcd - Unencrypted + +```go +// alternatively, you can create a new viper instance. +var runtime_viper = viper.New() + +runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml") +runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop" + +// read from remote config the first time. +err := runtime_viper.ReadRemoteConfig() + +// unmarshal config +runtime_viper.Unmarshal(&runtime_conf) + +// open a goroutine to watch remote changes forever +go func(){ + for { + time.Sleep(time.Second * 5) // delay after each request + + // currently, only tested with etcd support + err := runtime_viper.WatchRemoteConfig() + if err != nil { + log.Errorf("unable to read remote config: %v", err) + continue + } + + // unmarshal new config into our runtime config struct. you can also use channel + // to implement a signal to notify the system of the changes + runtime_viper.Unmarshal(&runtime_conf) + } +}() +``` + +## Getting Values From Viper + +In Viper, there are a few ways to get a value depending on the value’s type. +The following functions and methods exist: + + * `Get(key string) : interface{}` + * `GetBool(key string) : bool` + * `GetFloat64(key string) : float64` + * `GetInt(key string) : int` + * `GetString(key string) : string` + * `GetStringMap(key string) : map[string]interface{}` + * `GetStringMapString(key string) : map[string]string` + * `GetStringSlice(key string) : []string` + * `GetTime(key string) : time.Time` + * `GetDuration(key string) : time.Duration` + * `IsSet(key string) : bool` + +One important thing to recognize is that each Get function will return a zero +value if it’s not found. To check if a given key exists, the `IsSet()` method +has been provided. + +Example: +```go +viper.GetString("logfile") // case-insensitive Setting & Getting +if viper.GetBool("verbose") { + fmt.Println("verbose enabled") +} +``` +### Accessing nested keys + +The accessor methods also accept formatted paths to deeply nested keys. For +example, if the following JSON file is loaded: + +```json +{ + "host": { + "address": "localhost", + "port": 5799 + }, + "datastore": { + "metric": { + "host": "127.0.0.1", + "port": 3099 + }, + "warehouse": { + "host": "198.0.0.1", + "port": 2112 + } + } +} + +``` + +Viper can access a nested field by passing a `.` delimited path of keys: + +```go +GetString("datastore.metric.host") // (returns "127.0.0.1") +``` + +This obeys the precedence rules established above; the search for the path +will cascade through the remaining configuration registries until found. + +For example, given this configuration file, both `datastore.metric.host` and +`datastore.metric.port` are already defined (and may be overridden). If in addition +`datastore.metric.protocol` was defined in the defaults, Viper would also find it. + +However, if `datastore.metric` was overridden (by a flag, an environment variable, +the `Set()` method, …) with an immediate value, then all sub-keys of +`datastore.metric` become undefined, they are “shadowed” by the higher-priority +configuration level. + +Lastly, if there exists a key that matches the delimited key path, its value +will be returned instead. E.g. + +```json +{ + "datastore.metric.host": "0.0.0.0", + "host": { + "address": "localhost", + "port": 5799 + }, + "datastore": { + "metric": { + "host": "127.0.0.1", + "port": 3099 + }, + "warehouse": { + "host": "198.0.0.1", + "port": 2112 + } + } +} + +GetString("datastore.metric.host") // returns "0.0.0.0" +``` + +### Extract sub-tree + +Extract sub-tree from Viper. + +For example, `viper` represents: + +```json +app: + cache1: + max-items: 100 + item-size: 64 + cache2: + max-items: 200 + item-size: 80 +``` + +After executing: + +```go +subv := viper.Sub("app.cache1") +``` + +`subv` represents: + +```json +max-items: 100 +item-size: 64 +``` + +Suppose we have: + +```go +func NewCache(cfg *Viper) *Cache {...} +``` + +which creates a cache based on config information formatted as `subv`. +Now it’s easy to create these 2 caches separately as: + +```go +cfg1 := viper.Sub("app.cache1") +cache1 := NewCache(cfg1) + +cfg2 := viper.Sub("app.cache2") +cache2 := NewCache(cfg2) +``` + +### Unmarshaling + +You also have the option of Unmarshaling all or a specific value to a struct, map, +etc. + +There are two methods to do this: + + * `Unmarshal(rawVal interface{}) : error` + * `UnmarshalKey(key string, rawVal interface{}) : error` + +Example: + +```go +type config struct { + Port int + Name string + PathMap string `mapstructure:"path_map"` +} + +var C config + +err := Unmarshal(&C) +if err != nil { + t.Fatalf("unable to decode into struct, %v", err) +} +``` + +## Viper or Vipers? + +Viper comes ready to use out of the box. There is no configuration or +initialization needed to begin using Viper. Since most applications will want +to use a single central repository for their configuration, the viper package +provides this. It is similar to a singleton. + +In all of the examples above, they demonstrate using viper in its singleton +style approach. + +### Working with multiple vipers + +You can also create many different vipers for use in your application. Each will +have its own unique set of configurations and values. Each can read from a +different config file, key value store, etc. All of the functions that viper +package supports are mirrored as methods on a viper. + +Example: + +```go +x := viper.New() +y := viper.New() + +x.SetDefault("ContentDir", "content") +y.SetDefault("ContentDir", "foobar") + +//... +``` + +When working with multiple vipers, it is up to the user to keep track of the +different vipers. + +## Q & A + +Q: Why not INI files? + +A: Ini files are pretty awful. There’s no standard format, and they are hard to +validate. Viper is designed to work with JSON, TOML or YAML files. If someone +really wants to add this feature, I’d be happy to merge it. It’s easy to specify +which formats your application will permit. + +Q: Why is it called “Viper”? + +A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe)) +to [Cobra](https://github.com/spf13/cobra). While both can operate completely +independently, together they make a powerful pair to handle much of your +application foundation needs. + +Q: Why is it called “Cobra”? + +A: Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go new file mode 100644 index 000000000..dd32f4e1c --- /dev/null +++ b/vendor/github.com/spf13/viper/flags.go @@ -0,0 +1,57 @@ +package viper + +import "github.com/spf13/pflag" + +// FlagValueSet is an interface that users can implement +// to bind a set of flags to viper. +type FlagValueSet interface { + VisitAll(fn func(FlagValue)) +} + +// FlagValue is an interface that users can implement +// to bind different flags to viper. +type FlagValue interface { + HasChanged() bool + Name() string + ValueString() string + ValueType() string +} + +// pflagValueSet is a wrapper around *pflag.ValueSet +// that implements FlagValueSet. +type pflagValueSet struct { + flags *pflag.FlagSet +} + +// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet. +func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) { + p.flags.VisitAll(func(flag *pflag.Flag) { + fn(pflagValue{flag}) + }) +} + +// pflagValue is a wrapper aroung *pflag.flag +// that implements FlagValue +type pflagValue struct { + flag *pflag.Flag +} + +// HasChanges returns whether the flag has changes or not. +func (p pflagValue) HasChanged() bool { + return p.flag.Changed +} + +// Name returns the name of the flag. +func (p pflagValue) Name() string { + return p.flag.Name +} + +// ValueString returns the value of the flag as a string. +func (p pflagValue) ValueString() string { + return p.flag.Value.String() +} + +// ValueType returns the type of the flag as a string. +func (p pflagValue) ValueType() string { + return p.flag.Value.Type() +} diff --git a/vendor/github.com/spf13/viper/flags_test.go b/vendor/github.com/spf13/viper/flags_test.go new file mode 100644 index 000000000..0b976b605 --- /dev/null +++ b/vendor/github.com/spf13/viper/flags_test.go @@ -0,0 +1,65 @@ +package viper + +import ( + "testing" + + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestBindFlagValueSet(t *testing.T) { + flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var testValues = map[string]*string{ + "host": nil, + "port": nil, + "endpoint": nil, + } + + var mutatedTestValues = map[string]string{ + "host": "localhost", + "port": "6060", + "endpoint": "/public", + } + + for name := range testValues { + testValues[name] = flagSet.String(name, "", "test") + } + + flagValueSet := pflagValueSet{flagSet} + + err := BindFlagValues(flagValueSet) + if err != nil { + t.Fatalf("error binding flag set, %v", err) + } + + flagSet.VisitAll(func(flag *pflag.Flag) { + flag.Value.Set(mutatedTestValues[flag.Name]) + flag.Changed = true + }) + + for name, expected := range mutatedTestValues { + assert.Equal(t, Get(name), expected) + } +} + +func TestBindFlagValue(t *testing.T) { + var testString = "testing" + var testValue = newStringValue(testString, &testString) + + flag := &pflag.Flag{ + Name: "testflag", + Value: testValue, + Changed: false, + } + + flagValue := pflagValue{flag} + BindFlagValue("testvalue", flagValue) + + assert.Equal(t, testString, Get("testvalue")) + + flag.Value.Set("testing_mutate") + flag.Changed = true //hack for pflag usage + + assert.Equal(t, "testing_mutate", Get("testvalue")) +} diff --git a/vendor/github.com/spf13/viper/overrides_test.go b/vendor/github.com/spf13/viper/overrides_test.go new file mode 100644 index 000000000..dd2aa9b0d --- /dev/null +++ b/vendor/github.com/spf13/viper/overrides_test.go @@ -0,0 +1,173 @@ +package viper + +import ( + "fmt" + "strings" + "testing" + + "github.com/spf13/cast" + "github.com/stretchr/testify/assert" +) + +type layer int + +const ( + defaultLayer layer = iota + 1 + overrideLayer +) + +func TestNestedOverrides(t *testing.T) { + assert := assert.New(t) + var v *Viper + + // Case 0: value overridden by a value + overrideDefault(assert, "tom", 10, "tom", 20) // "tom" is first given 10 as default value, then overridden by 20 + override(assert, "tom", 10, "tom", 20) // "tom" is first given value 10, then overridden by 20 + overrideDefault(assert, "tom.age", 10, "tom.age", 20) + override(assert, "tom.age", 10, "tom.age", 20) + overrideDefault(assert, "sawyer.tom.age", 10, "sawyer.tom.age", 20) + override(assert, "sawyer.tom.age", 10, "sawyer.tom.age", 20) + + // Case 1: key:value overridden by a value + v = overrideDefault(assert, "tom.age", 10, "tom", "boy") // "tom.age" is first given 10 as default value, then "tom" is overridden by "boy" + assert.Nil(v.Get("tom.age")) // "tom.age" should not exist anymore + v = override(assert, "tom.age", 10, "tom", "boy") + assert.Nil(v.Get("tom.age")) + + // Case 2: value overridden by a key:value + overrideDefault(assert, "tom", "boy", "tom.age", 10) // "tom" is first given "boy" as default value, then "tom" is overridden by map{"age":10} + override(assert, "tom.age", 10, "tom", "boy") + + // Case 3: key:value overridden by a key:value + v = overrideDefault(assert, "tom.size", 4, "tom.age", 10) + assert.Equal(4, v.Get("tom.size")) // value should still be reachable + v = override(assert, "tom.size", 4, "tom.age", 10) + assert.Equal(4, v.Get("tom.size")) + deepCheckValue(assert, v, overrideLayer, []string{"tom", "size"}, 4) + + // Case 4: key:value overridden by a map + v = overrideDefault(assert, "tom.size", 4, "tom", map[string]interface{}{"age": 10}) // "tom.size" is first given "4" as default value, then "tom" is overridden by map{"age":10} + assert.Equal(4, v.Get("tom.size")) // "tom.size" should still be reachable + assert.Equal(10, v.Get("tom.age")) // new value should be there + deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, 10) // new value should be there + v = override(assert, "tom.size", 4, "tom", map[string]interface{}{"age": 10}) + assert.Nil(v.Get("tom.size")) + assert.Equal(10, v.Get("tom.age")) + deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, 10) + + // Case 5: array overridden by a value + overrideDefault(assert, "tom", []int{10, 20}, "tom", 30) + override(assert, "tom", []int{10, 20}, "tom", 30) + overrideDefault(assert, "tom.age", []int{10, 20}, "tom.age", 30) + override(assert, "tom.age", []int{10, 20}, "tom.age", 30) + + // Case 6: array overridden by an array + overrideDefault(assert, "tom", []int{10, 20}, "tom", []int{30, 40}) + override(assert, "tom", []int{10, 20}, "tom", []int{30, 40}) + overrideDefault(assert, "tom.age", []int{10, 20}, "tom.age", []int{30, 40}) + v = override(assert, "tom.age", []int{10, 20}, "tom.age", []int{30, 40}) + // explicit array merge: + s, ok := v.Get("tom.age").([]int) + if assert.True(ok, "tom[\"age\"] is not a slice") { + v.Set("tom.age", append(s, []int{50, 60}...)) + assert.Equal([]int{30, 40, 50, 60}, v.Get("tom.age")) + deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, []int{30, 40, 50, 60}) + } +} + +func overrideDefault(assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper { + return overrideFromLayer(defaultLayer, assert, firstPath, firstValue, secondPath, secondValue) +} +func override(assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper { + return overrideFromLayer(overrideLayer, assert, firstPath, firstValue, secondPath, secondValue) +} + +// overrideFromLayer performs the sequential override and low-level checks. +// +// First assignment is made on layer l for path firstPath with value firstValue, +// the second one on the override layer (i.e., with the Set() function) +// for path secondPath with value secondValue. +// +// firstPath and secondPath can include an arbitrary number of dots to indicate +// a nested element. +// +// After each assignment, the value is checked, retrieved both by its full path +// and by its key sequence (successive maps). +func overrideFromLayer(l layer, assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper { + v := New() + firstKeys := strings.Split(firstPath, v.keyDelim) + if assert == nil || + len(firstKeys) == 0 || len(firstKeys[0]) == 0 { + return v + } + + // Set and check first value + switch l { + case defaultLayer: + v.SetDefault(firstPath, firstValue) + case overrideLayer: + v.Set(firstPath, firstValue) + default: + return v + } + assert.Equal(firstValue, v.Get(firstPath)) + deepCheckValue(assert, v, l, firstKeys, firstValue) + + // Override and check new value + secondKeys := strings.Split(secondPath, v.keyDelim) + if len(secondKeys) == 0 || len(secondKeys[0]) == 0 { + return v + } + v.Set(secondPath, secondValue) + assert.Equal(secondValue, v.Get(secondPath)) + deepCheckValue(assert, v, overrideLayer, secondKeys, secondValue) + + return v +} + +// deepCheckValue checks that all given keys correspond to a valid path in the +// configuration map of the given layer, and that the final value equals the one given +func deepCheckValue(assert *assert.Assertions, v *Viper, l layer, keys []string, value interface{}) { + if assert == nil || v == nil || + len(keys) == 0 || len(keys[0]) == 0 { + return + } + + // init + var val interface{} + var ms string + switch l { + case defaultLayer: + val = v.defaults + ms = "v.defaults" + case overrideLayer: + val = v.override + ms = "v.override" + } + + // loop through map + var m map[string]interface{} + err := false + for _, k := range keys { + if val == nil { + assert.Fail(fmt.Sprintf("%s is not a map[string]interface{}", ms)) + return + } + + // deep scan of the map to get the final value + switch val.(type) { + case map[interface{}]interface{}: + m = cast.ToStringMap(val) + case map[string]interface{}: + m = val.(map[string]interface{}) + default: + assert.Fail(fmt.Sprintf("%s is not a map[string]interface{}", ms)) + return + } + ms = ms + "[\"" + k + "\"]" + val = m[k] + } + if !err { + assert.Equal(value, val) + } +} diff --git a/vendor/github.com/spf13/viper/remote/remote.go b/vendor/github.com/spf13/viper/remote/remote.go new file mode 100644 index 000000000..810d0702e --- /dev/null +++ b/vendor/github.com/spf13/viper/remote/remote.go @@ -0,0 +1,105 @@ +// Copyright © 2015 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Package remote integrates the remote features of Viper. +package remote + +import ( + "bytes" + "io" + "os" + + "github.com/spf13/viper" + crypt "github.com/xordataexchange/crypt/config" +) + +type remoteConfigProvider struct{} + +func (rc remoteConfigProvider) Get(rp viper.RemoteProvider) (io.Reader, error) { + cm, err := getConfigManager(rp) + if err != nil { + return nil, err + } + b, err := cm.Get(rp.Path()) + if err != nil { + return nil, err + } + return bytes.NewReader(b), nil +} + +func (rc remoteConfigProvider) Watch(rp viper.RemoteProvider) (io.Reader, error) { + cm, err := getConfigManager(rp) + if err != nil { + return nil, err + } + resp, err := cm.Get(rp.Path()) + if err != nil { + return nil, err + } + + return bytes.NewReader(resp), nil +} + +func (rc remoteConfigProvider) WatchChannel(rp viper.RemoteProvider) (<-chan *viper.RemoteResponse, chan bool) { + cm, err := getConfigManager(rp) + if err != nil { + return nil, nil + } + quit := make(chan bool) + quitwc := make(chan bool) + viperResponsCh := make(chan *viper.RemoteResponse) + cryptoResponseCh := cm.Watch(rp.Path(), quit) + // need this function to convert the Channel response form crypt.Response to viper.Response + go func(cr <-chan *crypt.Response, vr chan<- *viper.RemoteResponse, quitwc <-chan bool, quit chan<- bool) { + for { + select { + case <-quitwc: + quit <- true + return + case resp := <-cr: + vr <- &viper.RemoteResponse{ + Error: resp.Error, + Value: resp.Value, + } + + } + + } + }(cryptoResponseCh, viperResponsCh, quitwc, quit) + + return viperResponsCh, quitwc +} + +func getConfigManager(rp viper.RemoteProvider) (crypt.ConfigManager, error) { + var cm crypt.ConfigManager + var err error + + if rp.SecretKeyring() != "" { + kr, err := os.Open(rp.SecretKeyring()) + defer kr.Close() + if err != nil { + return nil, err + } + if rp.Provider() == "etcd" { + cm, err = crypt.NewEtcdConfigManager([]string{rp.Endpoint()}, kr) + } else { + cm, err = crypt.NewConsulConfigManager([]string{rp.Endpoint()}, kr) + } + } else { + if rp.Provider() == "etcd" { + cm, err = crypt.NewStandardEtcdConfigManager([]string{rp.Endpoint()}) + } else { + cm, err = crypt.NewStandardConsulConfigManager([]string{rp.Endpoint()}) + } + } + if err != nil { + return nil, err + } + return cm, nil +} + +func init() { + viper.RemoteConfig = &remoteConfigProvider{} +} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go new file mode 100644 index 000000000..952cad44c --- /dev/null +++ b/vendor/github.com/spf13/viper/util.go @@ -0,0 +1,221 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Viper is a application configuration system. +// It believes that applications can be configured a variety of ways +// via flags, ENVIRONMENT variables, configuration files retrieved +// from the file system, or a remote key/value store. + +package viper + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "unicode" + + "github.com/spf13/afero" + "github.com/spf13/cast" + jww "github.com/spf13/jwalterweatherman" +) + +// ConfigParseError denotes failing to parse configuration file. +type ConfigParseError struct { + err error +} + +// Error returns the formatted configuration error. +func (pe ConfigParseError) Error() string { + return fmt.Sprintf("While parsing config: %s", pe.err.Error()) +} + +// toCaseInsensitiveValue checks if the value is a map; +// if so, create a copy and lower-case the keys recursively. +func toCaseInsensitiveValue(value interface{}) interface{} { + switch v := value.(type) { + case map[interface{}]interface{}: + value = copyAndInsensitiviseMap(cast.ToStringMap(v)) + case map[string]interface{}: + value = copyAndInsensitiviseMap(v) + } + + return value +} + +// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of +// any map it makes case insensitive. +func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { + nm := make(map[string]interface{}) + + for key, val := range m { + lkey := strings.ToLower(key) + switch v := val.(type) { + case map[interface{}]interface{}: + nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) + case map[string]interface{}: + nm[lkey] = copyAndInsensitiviseMap(v) + default: + nm[lkey] = v + } + } + + return nm +} + +func insensitiviseMap(m map[string]interface{}) { + for key, val := range m { + switch val.(type) { + case map[interface{}]interface{}: + // nested map: cast and recursively insensitivise + val = cast.ToStringMap(val) + insensitiviseMap(val.(map[string]interface{})) + case map[string]interface{}: + // nested map: recursively insensitivise + insensitiviseMap(val.(map[string]interface{})) + } + + lower := strings.ToLower(key) + if key != lower { + // remove old key (not lower-cased) + delete(m, key) + } + // update map + m[lower] = val + } +} + +func absPathify(inPath string) string { + jww.INFO.Println("Trying to resolve absolute path to", inPath) + + if strings.HasPrefix(inPath, "$HOME") { + inPath = userHomeDir() + inPath[5:] + } + + if strings.HasPrefix(inPath, "$") { + end := strings.Index(inPath, string(os.PathSeparator)) + inPath = os.Getenv(inPath[1:end]) + inPath[end:] + } + + if filepath.IsAbs(inPath) { + return filepath.Clean(inPath) + } + + p, err := filepath.Abs(inPath) + if err == nil { + return filepath.Clean(p) + } + + jww.ERROR.Println("Couldn't discover absolute path") + jww.ERROR.Println(err) + return "" +} + +// Check if File / Directory Exists +func exists(fs afero.Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +func userHomeDir() string { + if runtime.GOOS == "windows" { + home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + if home == "" { + home = os.Getenv("USERPROFILE") + } + return home + } + return os.Getenv("HOME") +} + +func safeMul(a, b uint) uint { + c := a * b + if a > 1 && b > 1 && c/b != a { + return 0 + } + return c +} + +// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes +func parseSizeInBytes(sizeStr string) uint { + sizeStr = strings.TrimSpace(sizeStr) + lastChar := len(sizeStr) - 1 + multiplier := uint(1) + + if lastChar > 0 { + if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' { + if lastChar > 1 { + switch unicode.ToLower(rune(sizeStr[lastChar-1])) { + case 'k': + multiplier = 1 << 10 + sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) + case 'm': + multiplier = 1 << 20 + sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) + case 'g': + multiplier = 1 << 30 + sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) + default: + multiplier = 1 + sizeStr = strings.TrimSpace(sizeStr[:lastChar]) + } + } + } + } + + size := cast.ToInt(sizeStr) + if size < 0 { + size = 0 + } + + return safeMul(uint(size), multiplier) +} + +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]interface{}) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]interface{}) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]interface{}) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} diff --git a/vendor/github.com/spf13/viper/util_test.go b/vendor/github.com/spf13/viper/util_test.go new file mode 100644 index 000000000..0af80bb63 --- /dev/null +++ b/vendor/github.com/spf13/viper/util_test.go @@ -0,0 +1,54 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Viper is a application configuration system. +// It believes that applications can be configured a variety of ways +// via flags, ENVIRONMENT variables, configuration files retrieved +// from the file system, or a remote key/value store. + +package viper + +import ( + "reflect" + "testing" +) + +func TestCopyAndInsensitiviseMap(t *testing.T) { + var ( + given = map[string]interface{}{ + "Foo": 32, + "Bar": map[interface{}]interface { + }{ + "ABc": "A", + "cDE": "B"}, + } + expected = map[string]interface{}{ + "foo": 32, + "bar": map[string]interface { + }{ + "abc": "A", + "cde": "B"}, + } + ) + + got := copyAndInsensitiviseMap(given) + + if !reflect.DeepEqual(got, expected) { + t.Fatalf("Got %q\nexpected\n%q", got, expected) + } + + if _, ok := given["foo"]; ok { + t.Fatal("Input map changed") + } + + if _, ok := given["bar"]; ok { + t.Fatal("Input map changed") + } + + m := given["Bar"].(map[interface{}]interface{}) + if _, ok := m["ABc"]; !ok { + t.Fatal("Input map changed") + } +} diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go new file mode 100644 index 000000000..e9966ba58 --- /dev/null +++ b/vendor/github.com/spf13/viper/viper.go @@ -0,0 +1,1771 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Viper is a application configuration system. +// It believes that applications can be configured a variety of ways +// via flags, ENVIRONMENT variables, configuration files retrieved +// from the file system, or a remote key/value store. + +// Each item takes precedence over the item below it: + +// overrides +// flag +// env +// config +// key/value store +// default + +package viper + +import ( + "bytes" + "encoding/csv" + "encoding/json" + "fmt" + "io" + "log" + "os" + "path/filepath" + "reflect" + "strings" + "time" + + yaml "gopkg.in/yaml.v2" + + "github.com/fsnotify/fsnotify" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/printer" + "github.com/magiconair/properties" + "github.com/mitchellh/mapstructure" + toml "github.com/pelletier/go-toml" + "github.com/spf13/afero" + "github.com/spf13/cast" + jww "github.com/spf13/jwalterweatherman" + "github.com/spf13/pflag" +) + +// ConfigMarshalError happens when failing to marshal the configuration. +type ConfigMarshalError struct { + err error +} + +// Error returns the formatted configuration error. +func (e ConfigMarshalError) Error() string { + return fmt.Sprintf("While marshaling config: %s", e.err.Error()) +} + +var v *Viper + +type RemoteResponse struct { + Value []byte + Error error +} + +func init() { + v = New() +} + +type remoteConfigFactory interface { + Get(rp RemoteProvider) (io.Reader, error) + Watch(rp RemoteProvider) (io.Reader, error) + WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) +} + +// RemoteConfig is optional, see the remote package +var RemoteConfig remoteConfigFactory + +// UnsupportedConfigError denotes encountering an unsupported +// configuration filetype. +type UnsupportedConfigError string + +// Error returns the formatted configuration error. +func (str UnsupportedConfigError) Error() string { + return fmt.Sprintf("Unsupported Config Type %q", string(str)) +} + +// UnsupportedRemoteProviderError denotes encountering an unsupported remote +// provider. Currently only etcd and Consul are supported. +type UnsupportedRemoteProviderError string + +// Error returns the formatted remote provider error. +func (str UnsupportedRemoteProviderError) Error() string { + return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) +} + +// RemoteConfigError denotes encountering an error while trying to +// pull the configuration from the remote provider. +type RemoteConfigError string + +// Error returns the formatted remote provider error +func (rce RemoteConfigError) Error() string { + return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) +} + +// ConfigFileNotFoundError denotes failing to find configuration file. +type ConfigFileNotFoundError struct { + name, locations string +} + +// Error returns the formatted configuration error. +func (fnfe ConfigFileNotFoundError) Error() string { + return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations) +} + +// Viper is a prioritized configuration registry. It +// maintains a set of configuration sources, fetches +// values to populate those, and provides them according +// to the source's priority. +// The priority of the sources is the following: +// 1. overrides +// 2. flags +// 3. env. variables +// 4. config file +// 5. key/value store +// 6. defaults +// +// For example, if values from the following sources were loaded: +// +// Defaults : { +// "secret": "", +// "user": "default", +// "endpoint": "https://localhost" +// } +// Config : { +// "user": "root" +// "secret": "defaultsecret" +// } +// Env : { +// "secret": "somesecretkey" +// } +// +// The resulting config will have the following values: +// +// { +// "secret": "somesecretkey", +// "user": "root", +// "endpoint": "https://localhost" +// } +type Viper struct { + // Delimiter that separates a list of keys + // used to access a nested value in one go + keyDelim string + + // A set of paths to look for the config file in + configPaths []string + + // The filesystem to read config from. + fs afero.Fs + + // A set of remote providers to search for the configuration + remoteProviders []*defaultRemoteProvider + + // Name of file to look for inside the path + configName string + configFile string + configType string + envPrefix string + + automaticEnvApplied bool + envKeyReplacer *strings.Replacer + + config map[string]interface{} + override map[string]interface{} + defaults map[string]interface{} + kvstore map[string]interface{} + pflags map[string]FlagValue + env map[string]string + aliases map[string]string + typeByDefValue bool + + // Store read properties on the object so that we can write back in order with comments. + // This will only be used if the configuration read is a properties file. + properties *properties.Properties + + onConfigChange func(fsnotify.Event) +} + +// New returns an initialized Viper instance. +func New() *Viper { + v := new(Viper) + v.keyDelim = "." + v.configName = "config" + v.fs = afero.NewOsFs() + v.config = make(map[string]interface{}) + v.override = make(map[string]interface{}) + v.defaults = make(map[string]interface{}) + v.kvstore = make(map[string]interface{}) + v.pflags = make(map[string]FlagValue) + v.env = make(map[string]string) + v.aliases = make(map[string]string) + v.typeByDefValue = false + + return v +} + +// Intended for testing, will reset all to default settings. +// In the public interface for the viper package so applications +// can use it in their testing as well. +func Reset() { + v = New() + SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} + SupportedRemoteProviders = []string{"etcd", "consul"} +} + +type defaultRemoteProvider struct { + provider string + endpoint string + path string + secretKeyring string +} + +func (rp defaultRemoteProvider) Provider() string { + return rp.provider +} + +func (rp defaultRemoteProvider) Endpoint() string { + return rp.endpoint +} + +func (rp defaultRemoteProvider) Path() string { + return rp.path +} + +func (rp defaultRemoteProvider) SecretKeyring() string { + return rp.secretKeyring +} + +// RemoteProvider stores the configuration necessary +// to connect to a remote key/value store. +// Optional secretKeyring to unencrypt encrypted values +// can be provided. +type RemoteProvider interface { + Provider() string + Endpoint() string + Path() string + SecretKeyring() string +} + +// SupportedExts are universally supported extensions. +var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} + +// SupportedRemoteProviders are universally supported remote providers. +var SupportedRemoteProviders = []string{"etcd", "consul"} + +func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } +func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { + v.onConfigChange = run +} + +func WatchConfig() { v.WatchConfig() } +func (v *Viper) WatchConfig() { + go func() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way + filename, err := v.getConfigFile() + if err != nil { + log.Println("error:", err) + return + } + + configFile := filepath.Clean(filename) + configDir, _ := filepath.Split(configFile) + + done := make(chan bool) + go func() { + for { + select { + case event := <-watcher.Events: + // we only care about the config file + if filepath.Clean(event.Name) == configFile { + if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create { + err := v.ReadInConfig() + if err != nil { + log.Println("error:", err) + } + v.onConfigChange(event) + } + } + case err := <-watcher.Errors: + log.Println("error:", err) + } + } + }() + + watcher.Add(configDir) + <-done + }() +} + +// SetConfigFile explicitly defines the path, name and extension of the config file. +// Viper will use this and not check any of the config paths. +func SetConfigFile(in string) { v.SetConfigFile(in) } +func (v *Viper) SetConfigFile(in string) { + if in != "" { + v.configFile = in + } +} + +// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use. +// E.g. if your prefix is "spf", the env registry will look for env +// variables that start with "SPF_". +func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } +func (v *Viper) SetEnvPrefix(in string) { + if in != "" { + v.envPrefix = in + } +} + +func (v *Viper) mergeWithEnvPrefix(in string) string { + if v.envPrefix != "" { + return strings.ToUpper(v.envPrefix + "_" + in) + } + + return strings.ToUpper(in) +} + +// TODO: should getEnv logic be moved into find(). Can generalize the use of +// rewriting keys many things, Ex: Get('someKey') -> some_key +// (camel case to snake case for JSON keys perhaps) + +// getEnv is a wrapper around os.Getenv which replaces characters in the original +// key. This allows env vars which have different keys than the config object +// keys. +func (v *Viper) getEnv(key string) string { + if v.envKeyReplacer != nil { + key = v.envKeyReplacer.Replace(key) + } + return os.Getenv(key) +} + +// ConfigFileUsed returns the file used to populate the config registry. +func ConfigFileUsed() string { return v.ConfigFileUsed() } +func (v *Viper) ConfigFileUsed() string { return v.configFile } + +// AddConfigPath adds a path for Viper to search for the config file in. +// Can be called multiple times to define multiple search paths. +func AddConfigPath(in string) { v.AddConfigPath(in) } +func (v *Viper) AddConfigPath(in string) { + if in != "" { + absin := absPathify(in) + jww.INFO.Println("adding", absin, "to paths to search") + if !stringInSlice(absin, v.configPaths) { + v.configPaths = append(v.configPaths, absin) + } + } +} + +// AddRemoteProvider adds a remote configuration source. +// Remote Providers are searched in the order they are added. +// provider is a string value, "etcd" or "consul" are currently supported. +// endpoint is the url. etcd requires http://ip:port consul requires ip:port +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp" +func AddRemoteProvider(provider, endpoint, path string) error { + return v.AddRemoteProvider(provider, endpoint, path) +} +func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { + if !stringInSlice(provider, SupportedRemoteProviders) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +// AddSecureRemoteProvider adds a remote configuration source. +// Secure Remote Providers are searched in the order they are added. +// provider is a string value, "etcd" or "consul" are currently supported. +// endpoint is the url. etcd requires http://ip:port consul requires ip:port +// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp" +// Secure Remote Providers are implemented with github.com/xordataexchange/crypt +func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) +} + +func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + if !stringInSlice(provider, SupportedRemoteProviders) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + secretKeyring: secretkeyring, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { + for _, y := range v.remoteProviders { + if reflect.DeepEqual(y, p) { + return true + } + } + return false +} + +// searchMap recursively searches for a value for path in source map. +// Returns nil if not found. +// Note: This assumes that the path entries and map keys are lower cased. +func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} { + if len(path) == 0 { + return source + } + + next, ok := source[path[0]] + if ok { + // Fast path + if len(path) == 1 { + return next + } + + // Nested case + switch next.(type) { + case map[interface{}]interface{}: + return v.searchMap(cast.ToStringMap(next), path[1:]) + case map[string]interface{}: + // Type assertion is safe here since it is only reached + // if the type of `next` is the same as the type being asserted + return v.searchMap(next.(map[string]interface{}), path[1:]) + default: + // got a value but nested key expected, return "nil" for not found + return nil + } + } + return nil +} + +// searchMapWithPathPrefixes recursively searches for a value for path in source map. +// +// While searchMap() considers each path element as a single map key, this +// function searches for, and prioritizes, merged path elements. +// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" +// is also defined, this latter value is returned for path ["foo", "bar"]. +// +// This should be useful only at config level (other maps may not contain dots +// in their keys). +// +// Note: This assumes that the path entries and map keys are lower cased. +func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} { + if len(path) == 0 { + return source + } + + // search for path prefixes, starting from the longest one + for i := len(path); i > 0; i-- { + prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) + + next, ok := source[prefixKey] + if ok { + // Fast path + if i == len(path) { + return next + } + + // Nested case + var val interface{} + switch next.(type) { + case map[interface{}]interface{}: + val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:]) + case map[string]interface{}: + // Type assertion is safe here since it is only reached + // if the type of `next` is the same as the type being asserted + val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:]) + default: + // got a value but nested key expected, do nothing and look for next prefix + } + if val != nil { + return val + } + } + } + + // not found + return nil +} + +// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere +// on its path in the map. +// e.g., if "foo.bar" has a value in the given map, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string { + var parentVal interface{} + for i := 1; i < len(path); i++ { + parentVal = v.searchMap(m, path[0:i]) + if parentVal == nil { + // not found, no need to add more path elements + return "" + } + switch parentVal.(type) { + case map[interface{}]interface{}: + continue + case map[string]interface{}: + continue + default: + // parentVal is a regular value which shadows "path" + return strings.Join(path[0:i], v.keyDelim) + } + } + return "" +} + +// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere +// in a sub-path of the map. +// e.g., if "foo.bar" has a value in the given map, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { + // unify input map + var m map[string]interface{} + switch mi.(type) { + case map[string]string, map[string]FlagValue: + m = cast.ToStringMap(mi) + default: + return "" + } + + // scan paths + var parentKey string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if _, ok := m[parentKey]; ok { + return parentKey + } + } + return "" +} + +// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere +// in the environment, when automatic env is on. +// e.g., if "foo.bar" has a value in the environment, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInAutoEnv(path []string) string { + var parentKey string + var val string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if val = v.getEnv(v.mergeWithEnvPrefix(parentKey)); val != "" { + return parentKey + } + } + return "" +} + +// SetTypeByDefaultValue enables or disables the inference of a key value's +// type when the Get function is used based upon a key's default value as +// opposed to the value returned based on the normal fetch logic. +// +// For example, if a key has a default value of []string{} and the same key +// is set via an environment variable to "a b c", a call to the Get function +// would return a string slice for the key if the key's type is inferred by +// the default value and the Get function would return: +// +// []string {"a", "b", "c"} +// +// Otherwise the Get function would return: +// +// "a b c" +func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } +func (v *Viper) SetTypeByDefaultValue(enable bool) { + v.typeByDefValue = enable +} + +// GetViper gets the global Viper instance. +func GetViper() *Viper { + return v +} + +// Get can retrieve any value given the key to use. +// Get is case-insensitive for a key. +// Get has the behavior of returning the value associated with the first +// place from where it is set. Viper will check in the following order: +// override, flag, env, config file, key/value store, default +// +// Get returns an interface. For a specific value use one of the Get____ methods. +func Get(key string) interface{} { return v.Get(key) } +func (v *Viper) Get(key string) interface{} { + lcaseKey := strings.ToLower(key) + val := v.find(lcaseKey) + if val == nil { + return nil + } + + if v.typeByDefValue { + // TODO(bep) this branch isn't covered by a single test. + valType := val + path := strings.Split(lcaseKey, v.keyDelim) + defVal := v.searchMap(v.defaults, path) + if defVal != nil { + valType = defVal + } + + switch valType.(type) { + case bool: + return cast.ToBool(val) + case string: + return cast.ToString(val) + case int64, int32, int16, int8, int: + return cast.ToInt(val) + case float64, float32: + return cast.ToFloat64(val) + case time.Time: + return cast.ToTime(val) + case time.Duration: + return cast.ToDuration(val) + case []string: + return cast.ToStringSlice(val) + } + } + + return val +} + +// Sub returns new Viper instance representing a sub tree of this instance. +// Sub is case-insensitive for a key. +func Sub(key string) *Viper { return v.Sub(key) } +func (v *Viper) Sub(key string) *Viper { + subv := New() + data := v.Get(key) + if data == nil { + return nil + } + + if reflect.TypeOf(data).Kind() == reflect.Map { + subv.config = cast.ToStringMap(data) + return subv + } + return nil +} + +// GetString returns the value associated with the key as a string. +func GetString(key string) string { return v.GetString(key) } +func (v *Viper) GetString(key string) string { + return cast.ToString(v.Get(key)) +} + +// GetBool returns the value associated with the key as a boolean. +func GetBool(key string) bool { return v.GetBool(key) } +func (v *Viper) GetBool(key string) bool { + return cast.ToBool(v.Get(key)) +} + +// GetInt returns the value associated with the key as an integer. +func GetInt(key string) int { return v.GetInt(key) } +func (v *Viper) GetInt(key string) int { + return cast.ToInt(v.Get(key)) +} + +// GetInt64 returns the value associated with the key as an integer. +func GetInt64(key string) int64 { return v.GetInt64(key) } +func (v *Viper) GetInt64(key string) int64 { + return cast.ToInt64(v.Get(key)) +} + +// GetFloat64 returns the value associated with the key as a float64. +func GetFloat64(key string) float64 { return v.GetFloat64(key) } +func (v *Viper) GetFloat64(key string) float64 { + return cast.ToFloat64(v.Get(key)) +} + +// GetTime returns the value associated with the key as time. +func GetTime(key string) time.Time { return v.GetTime(key) } +func (v *Viper) GetTime(key string) time.Time { + return cast.ToTime(v.Get(key)) +} + +// GetDuration returns the value associated with the key as a duration. +func GetDuration(key string) time.Duration { return v.GetDuration(key) } +func (v *Viper) GetDuration(key string) time.Duration { + return cast.ToDuration(v.Get(key)) +} + +// GetStringSlice returns the value associated with the key as a slice of strings. +func GetStringSlice(key string) []string { return v.GetStringSlice(key) } +func (v *Viper) GetStringSlice(key string) []string { + return cast.ToStringSlice(v.Get(key)) +} + +// GetStringMap returns the value associated with the key as a map of interfaces. +func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } +func (v *Viper) GetStringMap(key string) map[string]interface{} { + return cast.ToStringMap(v.Get(key)) +} + +// GetStringMapString returns the value associated with the key as a map of strings. +func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } +func (v *Viper) GetStringMapString(key string) map[string]string { + return cast.ToStringMapString(v.Get(key)) +} + +// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. +func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } +func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { + return cast.ToStringMapStringSlice(v.Get(key)) +} + +// GetSizeInBytes returns the size of the value associated with the given key +// in bytes. +func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } +func (v *Viper) GetSizeInBytes(key string) uint { + sizeStr := cast.ToString(v.Get(key)) + return parseSizeInBytes(sizeStr) +} + +// UnmarshalKey takes a single key and unmarshals it into a Struct. +func UnmarshalKey(key string, rawVal interface{}) error { return v.UnmarshalKey(key, rawVal) } +func (v *Viper) UnmarshalKey(key string, rawVal interface{}) error { + err := decode(v.Get(key), defaultDecoderConfig(rawVal)) + + if err != nil { + return err + } + + v.insensitiviseMaps() + + return nil +} + +// Unmarshal unmarshals the config into a Struct. Make sure that the tags +// on the fields of the structure are properly set. +func Unmarshal(rawVal interface{}) error { return v.Unmarshal(rawVal) } +func (v *Viper) Unmarshal(rawVal interface{}) error { + err := decode(v.AllSettings(), defaultDecoderConfig(rawVal)) + + if err != nil { + return err + } + + v.insensitiviseMaps() + + return nil +} + +// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot +// of time.Duration values & string slices +func defaultDecoderConfig(output interface{}) *mapstructure.DecoderConfig { + return &mapstructure.DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ), + } +} + +// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality +func decode(input interface{}, config *mapstructure.DecoderConfig) error { + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + return decoder.Decode(input) +} + +// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent +// in the destination struct. +func (v *Viper) UnmarshalExact(rawVal interface{}) error { + config := defaultDecoderConfig(rawVal) + config.ErrorUnused = true + + err := decode(v.AllSettings(), config) + + if err != nil { + return err + } + + v.insensitiviseMaps() + + return nil +} + +// BindPFlags binds a full flag set to the configuration, using each flag's long +// name as the config key. +func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } +func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { + return v.BindFlagValues(pflagValueSet{flags}) +} + +// BindPFlag binds a specific key to a pflag (as used by cobra). +// Example (where serverCmd is a Cobra instance): +// +// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) +// +func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } +func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { + return v.BindFlagValue(key, pflagValue{flag}) +} + +// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long +// name as the config key. +func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } +func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { + flags.VisitAll(func(flag FlagValue) { + if err = v.BindFlagValue(flag.Name(), flag); err != nil { + return + } + }) + return nil +} + +// BindFlagValue binds a specific key to a FlagValue. +// Example (where serverCmd is a Cobra instance): +// +// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +// Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port")) +// +func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } +func (v *Viper) BindFlagValue(key string, flag FlagValue) error { + if flag == nil { + return fmt.Errorf("flag for %q is nil", key) + } + v.pflags[strings.ToLower(key)] = flag + return nil +} + +// BindEnv binds a Viper key to a ENV variable. +// ENV variables are case sensitive. +// If only a key is provided, it will use the env key matching the key, uppercased. +// EnvPrefix will be used when set when env name is not provided. +func BindEnv(input ...string) error { return v.BindEnv(input...) } +func (v *Viper) BindEnv(input ...string) error { + var key, envkey string + if len(input) == 0 { + return fmt.Errorf("BindEnv missing key to bind to") + } + + key = strings.ToLower(input[0]) + + if len(input) == 1 { + envkey = v.mergeWithEnvPrefix(key) + } else { + envkey = input[1] + } + + v.env[key] = envkey + + return nil +} + +// Given a key, find the value. +// Viper will check in the following order: +// flag, env, config file, key/value store, default. +// Viper will check to see if an alias exists first. +// Note: this assumes a lower-cased key given. +func (v *Viper) find(lcaseKey string) interface{} { + + var ( + val interface{} + exists bool + path = strings.Split(lcaseKey, v.keyDelim) + nested = len(path) > 1 + ) + + // compute the path through the nested maps to the nested value + if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" { + return nil + } + + // if the requested key is an alias, then return the proper key + lcaseKey = v.realKey(lcaseKey) + path = strings.Split(lcaseKey, v.keyDelim) + nested = len(path) > 1 + + // Set() override first + val = v.searchMap(v.override, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.override) != "" { + return nil + } + + // PFlag override next + flag, exists := v.pflags[lcaseKey] + if exists && flag.HasChanged() { + switch flag.ValueType() { + case "int", "int8", "int16", "int32", "int64": + return cast.ToInt(flag.ValueString()) + case "bool": + return cast.ToBool(flag.ValueString()) + case "stringSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return res + default: + return flag.ValueString() + } + } + if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" { + return nil + } + + // Env override next + if v.automaticEnvApplied { + // even if it hasn't been registered, if automaticEnv is used, + // check any Get request + if val = v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); val != "" { + return val + } + if nested && v.isPathShadowedInAutoEnv(path) != "" { + return nil + } + } + envkey, exists := v.env[lcaseKey] + if exists { + if val = v.getEnv(envkey); val != "" { + return val + } + } + if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { + return nil + } + + // Config file next + val = v.searchMapWithPathPrefixes(v.config, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.config) != "" { + return nil + } + + // K/V store next + val = v.searchMap(v.kvstore, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" { + return nil + } + + // Default next + val = v.searchMap(v.defaults, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" { + return nil + } + + // last chance: if no other value is returned and a flag does exist for the value, + // get the flag's value even if the flag's value has not changed + if flag, exists := v.pflags[lcaseKey]; exists { + switch flag.ValueType() { + case "int", "int8", "int16", "int32", "int64": + return cast.ToInt(flag.ValueString()) + case "bool": + return cast.ToBool(flag.ValueString()) + case "stringSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return res + default: + return flag.ValueString() + } + } + // last item, no need to check shadowing + + return nil +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +// IsSet checks to see if the key has been set in any of the data locations. +// IsSet is case-insensitive for a key. +func IsSet(key string) bool { return v.IsSet(key) } +func (v *Viper) IsSet(key string) bool { + lcaseKey := strings.ToLower(key) + val := v.find(lcaseKey) + return val != nil +} + +// AutomaticEnv has Viper check ENV variables for all. +// keys set in config, default & flags +func AutomaticEnv() { v.AutomaticEnv() } +func (v *Viper) AutomaticEnv() { + v.automaticEnvApplied = true +} + +// SetEnvKeyReplacer sets the strings.Replacer on the viper object +// Useful for mapping an environmental variable to a key that does +// not match it. +func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) } +func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { + v.envKeyReplacer = r +} + +// Aliases provide another accessor for the same key. +// This enables one to change a name without breaking the application +func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) } +func (v *Viper) RegisterAlias(alias string, key string) { + v.registerAlias(alias, strings.ToLower(key)) +} + +func (v *Viper) registerAlias(alias string, key string) { + alias = strings.ToLower(alias) + if alias != key && alias != v.realKey(key) { + _, exists := v.aliases[alias] + + if !exists { + // if we alias something that exists in one of the maps to another + // name, we'll never be able to get that value using the original + // name, so move the config value to the new realkey. + if val, ok := v.config[alias]; ok { + delete(v.config, alias) + v.config[key] = val + } + if val, ok := v.kvstore[alias]; ok { + delete(v.kvstore, alias) + v.kvstore[key] = val + } + if val, ok := v.defaults[alias]; ok { + delete(v.defaults, alias) + v.defaults[key] = val + } + if val, ok := v.override[alias]; ok { + delete(v.override, alias) + v.override[key] = val + } + v.aliases[alias] = key + } + } else { + jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key)) + } +} + +func (v *Viper) realKey(key string) string { + newkey, exists := v.aliases[key] + if exists { + jww.DEBUG.Println("Alias", key, "to", newkey) + return v.realKey(newkey) + } + return key +} + +// InConfig checks to see if the given key (or an alias) is in the config file. +func InConfig(key string) bool { return v.InConfig(key) } +func (v *Viper) InConfig(key string) bool { + // if the requested key is an alias, then return the proper key + key = v.realKey(key) + + _, exists := v.config[key] + return exists +} + +// SetDefault sets the default value for this key. +// SetDefault is case-insensitive for a key. +// Default only used when no value is provided by the user via flag, config or ENV. +func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } +func (v *Viper) SetDefault(key string, value interface{}) { + // If alias passed in, then set the proper default + key = v.realKey(strings.ToLower(key)) + value = toCaseInsensitiveValue(value) + + path := strings.Split(key, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v.defaults, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value +} + +// Set sets the value for the key in the override regiser. +// Set is case-insensitive for a key. +// Will be used instead of values obtained via +// flags, config file, ENV, default, or key/value store. +func Set(key string, value interface{}) { v.Set(key, value) } +func (v *Viper) Set(key string, value interface{}) { + // If alias passed in, then set the proper override + key = v.realKey(strings.ToLower(key)) + value = toCaseInsensitiveValue(value) + + path := strings.Split(key, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v.override, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value +} + +// ReadInConfig will discover and load the configuration file from disk +// and key/value stores, searching in one of the defined paths. +func ReadInConfig() error { return v.ReadInConfig() } +func (v *Viper) ReadInConfig() error { + jww.INFO.Println("Attempting to read in config file") + filename, err := v.getConfigFile() + if err != nil { + return err + } + + if !stringInSlice(v.getConfigType(), SupportedExts) { + return UnsupportedConfigError(v.getConfigType()) + } + + jww.DEBUG.Println("Reading file: ", filename) + file, err := afero.ReadFile(v.fs, filename) + if err != nil { + return err + } + + config := make(map[string]interface{}) + + err = v.unmarshalReader(bytes.NewReader(file), config) + if err != nil { + return err + } + + v.config = config + return nil +} + +// MergeInConfig merges a new configuration with an existing config. +func MergeInConfig() error { return v.MergeInConfig() } +func (v *Viper) MergeInConfig() error { + jww.INFO.Println("Attempting to merge in config file") + filename, err := v.getConfigFile() + if err != nil { + return err + } + + if !stringInSlice(v.getConfigType(), SupportedExts) { + return UnsupportedConfigError(v.getConfigType()) + } + + file, err := afero.ReadFile(v.fs, filename) + if err != nil { + return err + } + + return v.MergeConfig(bytes.NewReader(file)) +} + +// ReadConfig will read a configuration file, setting existing keys to nil if the +// key does not exist in the file. +func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } +func (v *Viper) ReadConfig(in io.Reader) error { + v.config = make(map[string]interface{}) + return v.unmarshalReader(in, v.config) +} + +// MergeConfig merges a new configuration with an existing config. +func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } +func (v *Viper) MergeConfig(in io.Reader) error { + if v.config == nil { + v.config = make(map[string]interface{}) + } + cfg := make(map[string]interface{}) + if err := v.unmarshalReader(in, cfg); err != nil { + return err + } + mergeMaps(cfg, v.config, nil) + return nil +} + +// WriteConfig writes the current configuration to a file. +func WriteConfig() error { return v.WriteConfig() } +func (v *Viper) WriteConfig() error { + filename, err := v.getConfigFile() + if err != nil { + return err + } + return v.writeConfig(filename, true) +} + +// SafeWriteConfig writes current configuration to file only if the file does not exist. +func SafeWriteConfig() error { return v.SafeWriteConfig() } +func (v *Viper) SafeWriteConfig() error { + filename, err := v.getConfigFile() + if err != nil { + return err + } + return v.writeConfig(filename, false) +} + +// WriteConfigAs writes current configuration to a given filename. +func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } +func (v *Viper) WriteConfigAs(filename string) error { + return v.writeConfig(filename, true) +} + +// SafeWriteConfigAs writes current configuration to a given filename if it does not exist. +func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } +func (v *Viper) SafeWriteConfigAs(filename string) error { + return v.writeConfig(filename, false) +} + +func writeConfig(filename string, force bool) error { return v.writeConfig(filename, force) } +func (v *Viper) writeConfig(filename string, force bool) error { + jww.INFO.Println("Attempting to write configuration to file.") + ext := filepath.Ext(filename) + if len(ext) <= 1 { + return fmt.Errorf("Filename: %s requires valid extension.", filename) + } + configType := ext[1:] + if !stringInSlice(configType, SupportedExts) { + return UnsupportedConfigError(configType) + } + if v.config == nil { + v.config = make(map[string]interface{}) + } + var flags int + if force == true { + flags = os.O_CREATE | os.O_TRUNC | os.O_WRONLY + } else { + if _, err := os.Stat(filename); os.IsNotExist(err) { + flags = os.O_WRONLY + } else { + return fmt.Errorf("File: %s exists. Use WriteConfig to overwrite.", filename) + } + } + f, err := v.fs.OpenFile(filename, flags, os.FileMode(0644)) + if err != nil { + return err + } + return v.marshalWriter(f, configType) +} + +// Unmarshal a Reader into a map. +// Should probably be an unexported function. +func unmarshalReader(in io.Reader, c map[string]interface{}) error { + return v.unmarshalReader(in, c) +} +func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { + buf := new(bytes.Buffer) + buf.ReadFrom(in) + + switch strings.ToLower(v.getConfigType()) { + case "yaml", "yml": + if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { + return ConfigParseError{err} + } + + case "json": + if err := json.Unmarshal(buf.Bytes(), &c); err != nil { + return ConfigParseError{err} + } + + case "hcl": + obj, err := hcl.Parse(string(buf.Bytes())) + if err != nil { + return ConfigParseError{err} + } + if err = hcl.DecodeObject(&c, obj); err != nil { + return ConfigParseError{err} + } + + case "toml": + tree, err := toml.LoadReader(buf) + if err != nil { + return ConfigParseError{err} + } + tmap := tree.ToMap() + for k, v := range tmap { + c[k] = v + } + + case "properties", "props", "prop": + v.properties = properties.NewProperties() + var err error + if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { + return ConfigParseError{err} + } + for _, key := range v.properties.Keys() { + value, _ := v.properties.Get(key) + // recursively build nested maps + path := strings.Split(key, ".") + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(c, path[0:len(path)-1]) + // set innermost value + deepestMap[lastKey] = value + } + } + + insensitiviseMap(c) + return nil +} + +// Marshal a map into Writer. +func marshalWriter(f afero.File, configType string) error { + return v.marshalWriter(f, configType) +} +func (v *Viper) marshalWriter(f afero.File, configType string) error { + c := v.AllSettings() + switch configType { + case "json": + b, err := json.MarshalIndent(c, "", " ") + if err != nil { + return ConfigMarshalError{err} + } + _, err = f.WriteString(string(b)) + if err != nil { + return ConfigMarshalError{err} + } + + case "hcl": + b, err := json.Marshal(c) + ast, err := hcl.Parse(string(b)) + if err != nil { + return ConfigMarshalError{err} + } + err = printer.Fprint(f, ast.Node) + if err != nil { + return ConfigMarshalError{err} + } + + case "prop", "props", "properties": + if v.properties == nil { + v.properties = properties.NewProperties() + } + p := v.properties + for _, key := range v.AllKeys() { + _, _, err := p.Set(key, v.GetString(key)) + if err != nil { + return ConfigMarshalError{err} + } + } + _, err := p.WriteComment(f, "#", properties.UTF8) + if err != nil { + return ConfigMarshalError{err} + } + + case "toml": + t, err := toml.TreeFromMap(c) + if err != nil { + return ConfigMarshalError{err} + } + s := t.String() + if _, err := f.WriteString(s); err != nil { + return ConfigMarshalError{err} + } + + case "yaml", "yml": + b, err := yaml.Marshal(c) + if err != nil { + return ConfigMarshalError{err} + } + if _, err = f.WriteString(string(b)); err != nil { + return ConfigMarshalError{err} + } + } + return nil +} + +func keyExists(k string, m map[string]interface{}) string { + lk := strings.ToLower(k) + for mk := range m { + lmk := strings.ToLower(mk) + if lmk == lk { + return mk + } + } + return "" +} + +func castToMapStringInterface( + src map[interface{}]interface{}) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[fmt.Sprintf("%v", k)] = v + } + return tgt +} + +func castMapStringToMapInterface(src map[string]string) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + +func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + +// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's +// insistence on parsing nested structures as `map[interface{}]interface{}` +// instead of using a `string` as the key for nest structures beyond one level +// deep. Both map types are supported as there is a go-yaml fork that uses +// `map[string]interface{}` instead. +func mergeMaps( + src, tgt map[string]interface{}, itgt map[interface{}]interface{}) { + for sk, sv := range src { + tk := keyExists(sk, tgt) + if tk == "" { + jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv) + tgt[sk] = sv + if itgt != nil { + itgt[sk] = sv + } + continue + } + + tv, ok := tgt[tk] + if !ok { + jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv) + tgt[sk] = sv + if itgt != nil { + itgt[sk] = sv + } + continue + } + + svType := reflect.TypeOf(sv) + tvType := reflect.TypeOf(tv) + if svType != tvType { + jww.ERROR.Printf( + "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v", + sk, svType, tvType, sv, tv) + continue + } + + jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v", + sk, svType, tvType, sv, tv) + + switch ttv := tv.(type) { + case map[interface{}]interface{}: + jww.TRACE.Printf("merging maps (must convert)") + tsv := sv.(map[interface{}]interface{}) + ssv := castToMapStringInterface(tsv) + stv := castToMapStringInterface(ttv) + mergeMaps(ssv, stv, ttv) + case map[string]interface{}: + jww.TRACE.Printf("merging maps") + mergeMaps(sv.(map[string]interface{}), ttv, nil) + default: + jww.TRACE.Printf("setting value") + tgt[tk] = sv + if itgt != nil { + itgt[tk] = sv + } + } + } +} + +// ReadRemoteConfig attempts to get configuration from a remote source +// and read it in the remote configuration registry. +func ReadRemoteConfig() error { return v.ReadRemoteConfig() } +func (v *Viper) ReadRemoteConfig() error { + return v.getKeyValueConfig() +} + +func WatchRemoteConfig() error { return v.WatchRemoteConfig() } +func (v *Viper) WatchRemoteConfig() error { + return v.watchKeyValueConfig() +} + +func (v *Viper) WatchRemoteConfigOnChannel() error { + return v.watchKeyValueConfigOnChannel() +} + +func (v *Viper) insensitiviseMaps() { + insensitiviseMap(v.config) + insensitiviseMap(v.defaults) + insensitiviseMap(v.override) + insensitiviseMap(v.kvstore) +} + +// Retrieve the first found remote configuration. +func (v *Viper) getKeyValueConfig() error { + if RemoteConfig == nil { + return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") + } + + for _, rp := range v.remoteProviders { + val, err := v.getRemoteConfig(rp) + if err != nil { + continue + } + v.kvstore = val + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { + reader, err := RemoteConfig.Get(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfigOnChannel() error { + for _, rp := range v.remoteProviders { + respc, _ := RemoteConfig.WatchChannel(rp) + //Todo: Add quit channel + go func(rc <-chan *RemoteResponse) { + for { + b := <-rc + reader := bytes.NewReader(b.Value) + v.unmarshalReader(reader, v.kvstore) + } + }(respc) + return nil + } + return RemoteConfigError("No Files Found") +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfig() error { + for _, rp := range v.remoteProviders { + val, err := v.watchRemoteConfig(rp) + if err != nil { + continue + } + v.kvstore = val + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { + reader, err := RemoteConfig.Watch(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} + +// AllKeys returns all keys holding a value, regardless of where they are set. +// Nested keys are returned with a v.keyDelim (= ".") separator +func AllKeys() []string { return v.AllKeys() } +func (v *Viper) AllKeys() []string { + m := map[string]bool{} + // add all paths, by order of descending priority to ensure correct shadowing + m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") + m = v.flattenAndMergeMap(m, v.override, "") + m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) + m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env)) + m = v.flattenAndMergeMap(m, v.config, "") + m = v.flattenAndMergeMap(m, v.kvstore, "") + m = v.flattenAndMergeMap(m, v.defaults, "") + + // convert set of paths to list + a := []string{} + for x := range m { + a = append(a, x) + } + return a +} + +// flattenAndMergeMap recursively flattens the given map into a map[string]bool +// of key paths (used as a set, easier to manipulate than a []string): +// - each path is merged into a single key string, delimited with v.keyDelim (= ".") +// - if a path is shadowed by an earlier value in the initial shadow map, +// it is skipped. +// The resulting set of paths is merged to the given shadow set at the same time. +func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool { + if shadow != nil && prefix != "" && shadow[prefix] { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]bool) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += v.keyDelim + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = true + continue + } + // recursively merge to shadow map + shadow = v.flattenAndMergeMap(shadow, m2, fullKey) + } + return shadow +} + +// mergeFlatMap merges the given maps, excluding values of the second map +// shadowed by values from the first map. +func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool { + // scan keys +outer: + for k, _ := range m { + path := strings.Split(k, v.keyDelim) + // scan intermediate paths + var parentKey string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if shadow[parentKey] { + // path is shadowed, continue + continue outer + } + } + // add key + shadow[strings.ToLower(k)] = true + } + return shadow +} + +// AllSettings merges all settings and returns them as a map[string]interface{}. +func AllSettings() map[string]interface{} { return v.AllSettings() } +func (v *Viper) AllSettings() map[string]interface{} { + m := map[string]interface{}{} + // start from the list of keys, and construct the map one value at a time + for _, k := range v.AllKeys() { + value := v.Get(k) + if value == nil { + // should not happen, since AllKeys() returns only keys holding a value, + // check just in case anything changes + continue + } + path := strings.Split(k, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(m, path[0:len(path)-1]) + // set innermost value + deepestMap[lastKey] = value + } + return m +} + +// SetFs sets the filesystem to use to read configuration. +func SetFs(fs afero.Fs) { v.SetFs(fs) } +func (v *Viper) SetFs(fs afero.Fs) { + v.fs = fs +} + +// SetConfigName sets name for the config file. +// Does not include extension. +func SetConfigName(in string) { v.SetConfigName(in) } +func (v *Viper) SetConfigName(in string) { + if in != "" { + v.configName = in + v.configFile = "" + } +} + +// SetConfigType sets the type of the configuration returned by the +// remote source, e.g. "json". +func SetConfigType(in string) { v.SetConfigType(in) } +func (v *Viper) SetConfigType(in string) { + if in != "" { + v.configType = in + } +} + +func (v *Viper) getConfigType() string { + if v.configType != "" { + return v.configType + } + + cf, err := v.getConfigFile() + if err != nil { + return "" + } + + ext := filepath.Ext(cf) + + if len(ext) > 1 { + return ext[1:] + } + + return "" +} + +func (v *Viper) getConfigFile() (string, error) { + if v.configFile == "" { + cf, err := v.findConfigFile() + if err != nil { + return "", err + } + v.configFile = cf + } + return v.configFile, nil +} + +func (v *Viper) searchInPath(in string) (filename string) { + jww.DEBUG.Println("Searching for config in ", in) + for _, ext := range SupportedExts { + jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext)) + if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { + jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext)) + return filepath.Join(in, v.configName+"."+ext) + } + } + + return "" +} + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + jww.INFO.Println("Searching for config in ", v.configPaths) + + for _, cp := range v.configPaths { + file := v.searchInPath(cp) + if file != "" { + return file, nil + } + } + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} +} + +// Debug prints all configuration registries for debugging +// purposes. +func Debug() { v.Debug() } +func (v *Viper) Debug() { + fmt.Printf("Aliases:\n%#v\n", v.aliases) + fmt.Printf("Override:\n%#v\n", v.override) + fmt.Printf("PFlags:\n%#v\n", v.pflags) + fmt.Printf("Env:\n%#v\n", v.env) + fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore) + fmt.Printf("Config:\n%#v\n", v.config) + fmt.Printf("Defaults:\n%#v\n", v.defaults) +} diff --git a/vendor/github.com/spf13/viper/viper_test.go b/vendor/github.com/spf13/viper/viper_test.go new file mode 100644 index 000000000..c93480eab --- /dev/null +++ b/vendor/github.com/spf13/viper/viper_test.go @@ -0,0 +1,1406 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package viper + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/spf13/afero" + "github.com/spf13/cast" + + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +var yamlExample = []byte(`Hacker: true +name: steve +hobbies: +- skateboarding +- snowboarding +- go +clothing: + jacket: leather + trousers: denim + pants: + size: large +age: 35 +eyes : brown +beard: true +`) + +var yamlExampleWithExtras = []byte(`Existing: true +Bogus: true +`) + +type testUnmarshalExtra struct { + Existing bool +} + +var tomlExample = []byte(` +title = "TOML Example" + +[owner] +organization = "MongoDB" +Bio = "MongoDB Chief Developer Advocate & Hacker at Large" +dob = 1979-05-27T07:32:00Z # First class dates? Why not?`) + +var jsonExample = []byte(`{ +"id": "0001", +"type": "donut", +"name": "Cake", +"ppu": 0.55, +"batters": { + "batter": [ + { "type": "Regular" }, + { "type": "Chocolate" }, + { "type": "Blueberry" }, + { "type": "Devil's Food" } + ] + } +}`) + +var hclExample = []byte(` +id = "0001" +type = "donut" +name = "Cake" +ppu = 0.55 +foos { + foo { + key = 1 + } + foo { + key = 2 + } + foo { + key = 3 + } + foo { + key = 4 + } +}`) + +var propertiesExample = []byte(` +p_id: 0001 +p_type: donut +p_name: Cake +p_ppu: 0.55 +p_batters.batter.type: Regular +`) + +var remoteExample = []byte(`{ +"id":"0002", +"type":"cronut", +"newkey":"remote" +}`) + +func initConfigs() { + Reset() + var r io.Reader + SetConfigType("yaml") + r = bytes.NewReader(yamlExample) + unmarshalReader(r, v.config) + + SetConfigType("json") + r = bytes.NewReader(jsonExample) + unmarshalReader(r, v.config) + + SetConfigType("hcl") + r = bytes.NewReader(hclExample) + unmarshalReader(r, v.config) + + SetConfigType("properties") + r = bytes.NewReader(propertiesExample) + unmarshalReader(r, v.config) + + SetConfigType("toml") + r = bytes.NewReader(tomlExample) + unmarshalReader(r, v.config) + + SetConfigType("json") + remote := bytes.NewReader(remoteExample) + unmarshalReader(remote, v.kvstore) +} + +func initConfig(typ, config string) { + Reset() + SetConfigType(typ) + r := strings.NewReader(config) + + if err := unmarshalReader(r, v.config); err != nil { + panic(err) + } +} + +func initYAML() { + initConfig("yaml", string(yamlExample)) +} + +func initJSON() { + Reset() + SetConfigType("json") + r := bytes.NewReader(jsonExample) + + unmarshalReader(r, v.config) +} + +func initProperties() { + Reset() + SetConfigType("properties") + r := bytes.NewReader(propertiesExample) + + unmarshalReader(r, v.config) +} + +func initTOML() { + Reset() + SetConfigType("toml") + r := bytes.NewReader(tomlExample) + + unmarshalReader(r, v.config) +} + +func initHcl() { + Reset() + SetConfigType("hcl") + r := bytes.NewReader(hclExample) + + unmarshalReader(r, v.config) +} + +// make directories for testing +func initDirs(t *testing.T) (string, string, func()) { + + var ( + testDirs = []string{`a a`, `b`, `c\c`, `D_`} + config = `improbable` + ) + + root, err := ioutil.TempDir("", "") + + cleanup := true + defer func() { + if cleanup { + os.Chdir("..") + os.RemoveAll(root) + } + }() + + assert.Nil(t, err) + + err = os.Chdir(root) + assert.Nil(t, err) + + for _, dir := range testDirs { + err = os.Mkdir(dir, 0750) + assert.Nil(t, err) + + err = ioutil.WriteFile( + path.Join(dir, config+".toml"), + []byte("key = \"value is "+dir+"\"\n"), + 0640) + assert.Nil(t, err) + } + + cleanup = false + return root, config, func() { + os.Chdir("..") + os.RemoveAll(root) + } +} + +//stubs for PFlag Values +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Type() string { + return "string" +} + +func (s *stringValue) String() string { + return fmt.Sprintf("%s", *s) +} + +func TestBasics(t *testing.T) { + SetConfigFile("/tmp/config.yaml") + filename, err := v.getConfigFile() + assert.Equal(t, "/tmp/config.yaml", filename) + assert.NoError(t, err) +} + +func TestDefault(t *testing.T) { + SetDefault("age", 45) + assert.Equal(t, 45, Get("age")) + + SetDefault("clothing.jacket", "slacks") + assert.Equal(t, "slacks", Get("clothing.jacket")) + + SetConfigType("yaml") + err := ReadConfig(bytes.NewBuffer(yamlExample)) + + assert.NoError(t, err) + assert.Equal(t, "leather", Get("clothing.jacket")) +} + +func TestUnmarshaling(t *testing.T) { + SetConfigType("yaml") + r := bytes.NewReader(yamlExample) + + unmarshalReader(r, v.config) + assert.True(t, InConfig("name")) + assert.False(t, InConfig("state")) + assert.Equal(t, "steve", Get("name")) + assert.Equal(t, []interface{}{"skateboarding", "snowboarding", "go"}, Get("hobbies")) + assert.Equal(t, map[string]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[string]interface{}{"size": "large"}}, Get("clothing")) + assert.Equal(t, 35, Get("age")) +} + +func TestUnmarshalExact(t *testing.T) { + vip := New() + target := &testUnmarshalExtra{} + vip.SetConfigType("yaml") + r := bytes.NewReader(yamlExampleWithExtras) + vip.ReadConfig(r) + err := vip.UnmarshalExact(target) + if err == nil { + t.Fatal("UnmarshalExact should error when populating a struct from a conf that contains unused fields") + } +} + +func TestOverrides(t *testing.T) { + Set("age", 40) + assert.Equal(t, 40, Get("age")) +} + +func TestDefaultPost(t *testing.T) { + assert.NotEqual(t, "NYC", Get("state")) + SetDefault("state", "NYC") + assert.Equal(t, "NYC", Get("state")) +} + +func TestAliases(t *testing.T) { + RegisterAlias("years", "age") + assert.Equal(t, 40, Get("years")) + Set("years", 45) + assert.Equal(t, 45, Get("age")) +} + +func TestAliasInConfigFile(t *testing.T) { + // the config file specifies "beard". If we make this an alias for + // "hasbeard", we still want the old config file to work with beard. + RegisterAlias("beard", "hasbeard") + assert.Equal(t, true, Get("hasbeard")) + Set("hasbeard", false) + assert.Equal(t, false, Get("beard")) +} + +func TestYML(t *testing.T) { + initYAML() + assert.Equal(t, "steve", Get("name")) +} + +func TestJSON(t *testing.T) { + initJSON() + assert.Equal(t, "0001", Get("id")) +} + +func TestProperties(t *testing.T) { + initProperties() + assert.Equal(t, "0001", Get("p_id")) +} + +func TestTOML(t *testing.T) { + initTOML() + assert.Equal(t, "TOML Example", Get("title")) +} + +func TestHCL(t *testing.T) { + initHcl() + assert.Equal(t, "0001", Get("id")) + assert.Equal(t, 0.55, Get("ppu")) + assert.Equal(t, "donut", Get("type")) + assert.Equal(t, "Cake", Get("name")) + Set("id", "0002") + assert.Equal(t, "0002", Get("id")) + assert.NotEqual(t, "cronut", Get("type")) +} + +func TestRemotePrecedence(t *testing.T) { + initJSON() + + remote := bytes.NewReader(remoteExample) + assert.Equal(t, "0001", Get("id")) + unmarshalReader(remote, v.kvstore) + assert.Equal(t, "0001", Get("id")) + assert.NotEqual(t, "cronut", Get("type")) + assert.Equal(t, "remote", Get("newkey")) + Set("newkey", "newvalue") + assert.NotEqual(t, "remote", Get("newkey")) + assert.Equal(t, "newvalue", Get("newkey")) + Set("newkey", "remote") +} + +func TestEnv(t *testing.T) { + initJSON() + + BindEnv("id") + BindEnv("f", "FOOD") + + os.Setenv("ID", "13") + os.Setenv("FOOD", "apple") + os.Setenv("NAME", "crunk") + + assert.Equal(t, "13", Get("id")) + assert.Equal(t, "apple", Get("f")) + assert.Equal(t, "Cake", Get("name")) + + AutomaticEnv() + + assert.Equal(t, "crunk", Get("name")) + +} + +func TestEnvPrefix(t *testing.T) { + initJSON() + + SetEnvPrefix("foo") // will be uppercased automatically + BindEnv("id") + BindEnv("f", "FOOD") // not using prefix + + os.Setenv("FOO_ID", "13") + os.Setenv("FOOD", "apple") + os.Setenv("FOO_NAME", "crunk") + + assert.Equal(t, "13", Get("id")) + assert.Equal(t, "apple", Get("f")) + assert.Equal(t, "Cake", Get("name")) + + AutomaticEnv() + + assert.Equal(t, "crunk", Get("name")) +} + +func TestAutoEnv(t *testing.T) { + Reset() + + AutomaticEnv() + os.Setenv("FOO_BAR", "13") + assert.Equal(t, "13", Get("foo_bar")) +} + +func TestAutoEnvWithPrefix(t *testing.T) { + Reset() + + AutomaticEnv() + SetEnvPrefix("Baz") + os.Setenv("BAZ_BAR", "13") + assert.Equal(t, "13", Get("bar")) +} + +func TestSetEnvKeyReplacer(t *testing.T) { + Reset() + + AutomaticEnv() + os.Setenv("REFRESH_INTERVAL", "30s") + + replacer := strings.NewReplacer("-", "_") + SetEnvKeyReplacer(replacer) + + assert.Equal(t, "30s", Get("refresh-interval")) +} + +func TestAllKeys(t *testing.T) { + initConfigs() + + ks := sort.StringSlice{"title", "newkey", "owner.organization", "owner.dob", "owner.bio", "name", "beard", "ppu", "batters.batter", "hobbies", "clothing.jacket", "clothing.trousers", "clothing.pants.size", "age", "hacker", "id", "type", "eyes", "p_id", "p_ppu", "p_batters.batter.type", "p_type", "p_name", "foos"} + dob, _ := time.Parse(time.RFC3339, "1979-05-27T07:32:00Z") + all := map[string]interface{}{"owner": map[string]interface{}{"organization": "MongoDB", "bio": "MongoDB Chief Developer Advocate & Hacker at Large", "dob": dob}, "title": "TOML Example", "ppu": 0.55, "eyes": "brown", "clothing": map[string]interface{}{"trousers": "denim", "jacket": "leather", "pants": map[string]interface{}{"size": "large"}}, "id": "0001", "batters": map[string]interface{}{"batter": []interface{}{map[string]interface{}{"type": "Regular"}, map[string]interface{}{"type": "Chocolate"}, map[string]interface{}{"type": "Blueberry"}, map[string]interface{}{"type": "Devil's Food"}}}, "hacker": true, "beard": true, "hobbies": []interface{}{"skateboarding", "snowboarding", "go"}, "age": 35, "type": "donut", "newkey": "remote", "name": "Cake", "p_id": "0001", "p_ppu": "0.55", "p_name": "Cake", "p_batters": map[string]interface{}{"batter": map[string]interface{}{"type": "Regular"}}, "p_type": "donut", "foos": []map[string]interface{}{map[string]interface{}{"foo": []map[string]interface{}{map[string]interface{}{"key": 1}, map[string]interface{}{"key": 2}, map[string]interface{}{"key": 3}, map[string]interface{}{"key": 4}}}}} + + var allkeys sort.StringSlice + allkeys = AllKeys() + allkeys.Sort() + ks.Sort() + + assert.Equal(t, ks, allkeys) + assert.Equal(t, all, AllSettings()) +} + +func TestAllKeysWithEnv(t *testing.T) { + v := New() + + // bind and define environment variables (including a nested one) + v.BindEnv("id") + v.BindEnv("foo.bar") + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + os.Setenv("ID", "13") + os.Setenv("FOO_BAR", "baz") + + expectedKeys := sort.StringSlice{"id", "foo.bar"} + expectedKeys.Sort() + keys := sort.StringSlice(v.AllKeys()) + keys.Sort() + assert.Equal(t, expectedKeys, keys) +} + +func TestAliasesOfAliases(t *testing.T) { + Set("Title", "Checking Case") + RegisterAlias("Foo", "Bar") + RegisterAlias("Bar", "Title") + assert.Equal(t, "Checking Case", Get("FOO")) +} + +func TestRecursiveAliases(t *testing.T) { + RegisterAlias("Baz", "Roo") + RegisterAlias("Roo", "baz") +} + +func TestUnmarshal(t *testing.T) { + SetDefault("port", 1313) + Set("name", "Steve") + Set("duration", "1s1ms") + + type config struct { + Port int + Name string + Duration time.Duration + } + + var C config + + err := Unmarshal(&C) + if err != nil { + t.Fatalf("unable to decode into struct, %v", err) + } + + assert.Equal(t, &config{Name: "Steve", Port: 1313, Duration: time.Second + time.Millisecond}, &C) + + Set("port", 1234) + err = Unmarshal(&C) + if err != nil { + t.Fatalf("unable to decode into struct, %v", err) + } + assert.Equal(t, &config{Name: "Steve", Port: 1234, Duration: time.Second + time.Millisecond}, &C) +} + +func TestBindPFlags(t *testing.T) { + v := New() // create independent Viper object + flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var testValues = map[string]*string{ + "host": nil, + "port": nil, + "endpoint": nil, + } + + var mutatedTestValues = map[string]string{ + "host": "localhost", + "port": "6060", + "endpoint": "/public", + } + + for name := range testValues { + testValues[name] = flagSet.String(name, "", "test") + } + + err := v.BindPFlags(flagSet) + if err != nil { + t.Fatalf("error binding flag set, %v", err) + } + + flagSet.VisitAll(func(flag *pflag.Flag) { + flag.Value.Set(mutatedTestValues[flag.Name]) + flag.Changed = true + }) + + for name, expected := range mutatedTestValues { + assert.Equal(t, expected, v.Get(name)) + } + +} + +func TestBindPFlagsStringSlice(t *testing.T) { + for _, testValue := range []struct { + Expected []string + Value string + }{ + {[]string{}, ""}, + {[]string{"jeden"}, "jeden"}, + {[]string{"dwa", "trzy"}, "dwa,trzy"}, + {[]string{"cztery", "piec , szesc"}, "cztery,\"piec , szesc\""}} { + + for _, changed := range []bool{true, false} { + v := New() // create independent Viper object + flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) + flagSet.StringSlice("stringslice", testValue.Expected, "test") + flagSet.Visit(func(f *pflag.Flag) { + if len(testValue.Value) > 0 { + f.Value.Set(testValue.Value) + f.Changed = changed + } + }) + + err := v.BindPFlags(flagSet) + if err != nil { + t.Fatalf("error binding flag set, %v", err) + } + + type TestStr struct { + StringSlice []string + } + val := &TestStr{} + if err := v.Unmarshal(val); err != nil { + t.Fatalf("%+#v cannot unmarshal: %s", testValue.Value, err) + } + assert.Equal(t, testValue.Expected, val.StringSlice) + } + } +} + +func TestBindPFlag(t *testing.T) { + var testString = "testing" + var testValue = newStringValue(testString, &testString) + + flag := &pflag.Flag{ + Name: "testflag", + Value: testValue, + Changed: false, + } + + BindPFlag("testvalue", flag) + + assert.Equal(t, testString, Get("testvalue")) + + flag.Value.Set("testing_mutate") + flag.Changed = true //hack for pflag usage + + assert.Equal(t, "testing_mutate", Get("testvalue")) + +} + +func TestBoundCaseSensitivity(t *testing.T) { + assert.Equal(t, "brown", Get("eyes")) + + BindEnv("eYEs", "TURTLE_EYES") + os.Setenv("TURTLE_EYES", "blue") + + assert.Equal(t, "blue", Get("eyes")) + + var testString = "green" + var testValue = newStringValue(testString, &testString) + + flag := &pflag.Flag{ + Name: "eyeballs", + Value: testValue, + Changed: true, + } + + BindPFlag("eYEs", flag) + assert.Equal(t, "green", Get("eyes")) + +} + +func TestSizeInBytes(t *testing.T) { + input := map[string]uint{ + "": 0, + "b": 0, + "12 bytes": 0, + "200000000000gb": 0, + "12 b": 12, + "43 MB": 43 * (1 << 20), + "10mb": 10 * (1 << 20), + "1gb": 1 << 30, + } + + for str, expected := range input { + assert.Equal(t, expected, parseSizeInBytes(str), str) + } +} + +func TestFindsNestedKeys(t *testing.T) { + initConfigs() + dob, _ := time.Parse(time.RFC3339, "1979-05-27T07:32:00Z") + + Set("super", map[string]interface{}{ + "deep": map[string]interface{}{ + "nested": "value", + }, + }) + + expected := map[string]interface{}{ + "super": map[string]interface{}{ + "deep": map[string]interface{}{ + "nested": "value", + }, + }, + "super.deep": map[string]interface{}{ + "nested": "value", + }, + "super.deep.nested": "value", + "owner.organization": "MongoDB", + "batters.batter": []interface{}{ + map[string]interface{}{ + "type": "Regular", + }, + map[string]interface{}{ + "type": "Chocolate", + }, + map[string]interface{}{ + "type": "Blueberry", + }, + map[string]interface{}{ + "type": "Devil's Food", + }, + }, + "hobbies": []interface{}{ + "skateboarding", "snowboarding", "go", + }, + "title": "TOML Example", + "newkey": "remote", + "batters": map[string]interface{}{ + "batter": []interface{}{ + map[string]interface{}{ + "type": "Regular", + }, + map[string]interface{}{ + "type": "Chocolate", + }, map[string]interface{}{ + "type": "Blueberry", + }, map[string]interface{}{ + "type": "Devil's Food", + }, + }, + }, + "eyes": "brown", + "age": 35, + "owner": map[string]interface{}{ + "organization": "MongoDB", + "bio": "MongoDB Chief Developer Advocate & Hacker at Large", + "dob": dob, + }, + "owner.bio": "MongoDB Chief Developer Advocate & Hacker at Large", + "type": "donut", + "id": "0001", + "name": "Cake", + "hacker": true, + "ppu": 0.55, + "clothing": map[string]interface{}{ + "jacket": "leather", + "trousers": "denim", + "pants": map[string]interface{}{ + "size": "large", + }, + }, + "clothing.jacket": "leather", + "clothing.pants.size": "large", + "clothing.trousers": "denim", + "owner.dob": dob, + "beard": true, + "foos": []map[string]interface{}{ + map[string]interface{}{ + "foo": []map[string]interface{}{ + map[string]interface{}{ + "key": 1, + }, + map[string]interface{}{ + "key": 2, + }, + map[string]interface{}{ + "key": 3, + }, + map[string]interface{}{ + "key": 4, + }, + }, + }, + }, + } + + for key, expectedValue := range expected { + + assert.Equal(t, expectedValue, v.Get(key)) + } + +} + +func TestReadBufConfig(t *testing.T) { + v := New() + v.SetConfigType("yaml") + v.ReadConfig(bytes.NewBuffer(yamlExample)) + t.Log(v.AllKeys()) + + assert.True(t, v.InConfig("name")) + assert.False(t, v.InConfig("state")) + assert.Equal(t, "steve", v.Get("name")) + assert.Equal(t, []interface{}{"skateboarding", "snowboarding", "go"}, v.Get("hobbies")) + assert.Equal(t, map[string]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[string]interface{}{"size": "large"}}, v.Get("clothing")) + assert.Equal(t, 35, v.Get("age")) +} + +func TestIsSet(t *testing.T) { + v := New() + v.SetConfigType("yaml") + v.ReadConfig(bytes.NewBuffer(yamlExample)) + assert.True(t, v.IsSet("clothing.jacket")) + assert.False(t, v.IsSet("clothing.jackets")) + assert.False(t, v.IsSet("helloworld")) + v.Set("helloworld", "fubar") + assert.True(t, v.IsSet("helloworld")) +} + +func TestDirsSearch(t *testing.T) { + + root, config, cleanup := initDirs(t) + defer cleanup() + + v := New() + v.SetConfigName(config) + v.SetDefault(`key`, `default`) + + entries, err := ioutil.ReadDir(root) + for _, e := range entries { + if e.IsDir() { + v.AddConfigPath(e.Name()) + } + } + + err = v.ReadInConfig() + assert.Nil(t, err) + + assert.Equal(t, `value is `+path.Base(v.configPaths[0]), v.GetString(`key`)) +} + +func TestWrongDirsSearchNotFound(t *testing.T) { + + _, config, cleanup := initDirs(t) + defer cleanup() + + v := New() + v.SetConfigName(config) + v.SetDefault(`key`, `default`) + + v.AddConfigPath(`whattayoutalkingbout`) + v.AddConfigPath(`thispathaintthere`) + + err := v.ReadInConfig() + assert.Equal(t, reflect.TypeOf(ConfigFileNotFoundError{"", ""}), reflect.TypeOf(err)) + + // Even though config did not load and the error might have + // been ignored by the client, the default still loads + assert.Equal(t, `default`, v.GetString(`key`)) +} + +func TestWrongDirsSearchNotFoundForMerge(t *testing.T) { + + _, config, cleanup := initDirs(t) + defer cleanup() + + v := New() + v.SetConfigName(config) + v.SetDefault(`key`, `default`) + + v.AddConfigPath(`whattayoutalkingbout`) + v.AddConfigPath(`thispathaintthere`) + + err := v.MergeInConfig() + assert.Equal(t, reflect.TypeOf(ConfigFileNotFoundError{"", ""}), reflect.TypeOf(err)) + + // Even though config did not load and the error might have + // been ignored by the client, the default still loads + assert.Equal(t, `default`, v.GetString(`key`)) +} + +func TestSub(t *testing.T) { + v := New() + v.SetConfigType("yaml") + v.ReadConfig(bytes.NewBuffer(yamlExample)) + + subv := v.Sub("clothing") + assert.Equal(t, v.Get("clothing.pants.size"), subv.Get("pants.size")) + + subv = v.Sub("clothing.pants") + assert.Equal(t, v.Get("clothing.pants.size"), subv.Get("size")) + + subv = v.Sub("clothing.pants.size") + assert.Equal(t, (*Viper)(nil), subv) + + subv = v.Sub("missing.key") + assert.Equal(t, (*Viper)(nil), subv) +} + +var hclWriteExpected = []byte(`"foos" = { + "foo" = { + "key" = 1 + } + + "foo" = { + "key" = 2 + } + + "foo" = { + "key" = 3 + } + + "foo" = { + "key" = 4 + } +} + +"id" = "0001" + +"name" = "Cake" + +"ppu" = 0.55 + +"type" = "donut"`) + +func TestWriteConfigHCL(t *testing.T) { + v := New() + fs := afero.NewMemMapFs() + v.SetFs(fs) + v.SetConfigName("c") + v.SetConfigType("hcl") + err := v.ReadConfig(bytes.NewBuffer(hclExample)) + if err != nil { + t.Fatal(err) + } + if err := v.WriteConfigAs("c.hcl"); err != nil { + t.Fatal(err) + } + read, err := afero.ReadFile(fs, "c.hcl") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, hclWriteExpected, read) +} + +var jsonWriteExpected = []byte(`{ + "batters": { + "batter": [ + { + "type": "Regular" + }, + { + "type": "Chocolate" + }, + { + "type": "Blueberry" + }, + { + "type": "Devil's Food" + } + ] + }, + "id": "0001", + "name": "Cake", + "ppu": 0.55, + "type": "donut" +}`) + +func TestWriteConfigJson(t *testing.T) { + v := New() + fs := afero.NewMemMapFs() + v.SetFs(fs) + v.SetConfigName("c") + v.SetConfigType("json") + err := v.ReadConfig(bytes.NewBuffer(jsonExample)) + if err != nil { + t.Fatal(err) + } + if err := v.WriteConfigAs("c.json"); err != nil { + t.Fatal(err) + } + read, err := afero.ReadFile(fs, "c.json") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, jsonWriteExpected, read) +} + +var propertiesWriteExpected = []byte(`p_id = 0001 +p_type = donut +p_name = Cake +p_ppu = 0.55 +p_batters.batter.type = Regular +`) + +func TestWriteConfigProperties(t *testing.T) { + v := New() + fs := afero.NewMemMapFs() + v.SetFs(fs) + v.SetConfigName("c") + v.SetConfigType("properties") + err := v.ReadConfig(bytes.NewBuffer(propertiesExample)) + if err != nil { + t.Fatal(err) + } + if err := v.WriteConfigAs("c.properties"); err != nil { + t.Fatal(err) + } + read, err := afero.ReadFile(fs, "c.properties") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, propertiesWriteExpected, read) +} + +func TestWriteConfigTOML(t *testing.T) { + fs := afero.NewMemMapFs() + v := New() + v.SetFs(fs) + v.SetConfigName("c") + v.SetConfigType("toml") + err := v.ReadConfig(bytes.NewBuffer(tomlExample)) + if err != nil { + t.Fatal(err) + } + if err := v.WriteConfigAs("c.toml"); err != nil { + t.Fatal(err) + } + + // The TOML String method does not order the contents. + // Therefore, we must read the generated file and compare the data. + v2 := New() + v2.SetFs(fs) + v2.SetConfigName("c") + v2.SetConfigType("toml") + v2.SetConfigFile("c.toml") + err = v2.ReadInConfig() + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, v.GetString("title"), v2.GetString("title")) + assert.Equal(t, v.GetString("owner.bio"), v2.GetString("owner.bio")) + assert.Equal(t, v.GetString("owner.dob"), v2.GetString("owner.dob")) + assert.Equal(t, v.GetString("owner.organization"), v2.GetString("owner.organization")) +} + +var yamlWriteExpected = []byte(`age: 35 +beard: true +clothing: + jacket: leather + pants: + size: large + trousers: denim +eyes: brown +hacker: true +hobbies: +- skateboarding +- snowboarding +- go +name: steve +`) + +func TestWriteConfigYAML(t *testing.T) { + v := New() + fs := afero.NewMemMapFs() + v.SetFs(fs) + v.SetConfigName("c") + v.SetConfigType("yaml") + err := v.ReadConfig(bytes.NewBuffer(yamlExample)) + if err != nil { + t.Fatal(err) + } + if err := v.WriteConfigAs("c.yaml"); err != nil { + t.Fatal(err) + } + read, err := afero.ReadFile(fs, "c.yaml") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, yamlWriteExpected, read) +} + +var yamlMergeExampleTgt = []byte(` +hello: + pop: 37890 + lagrenum: 765432101234567 + world: + - us + - uk + - fr + - de +`) + +var yamlMergeExampleSrc = []byte(` +hello: + pop: 45000 + lagrenum: 7654321001234567 + universe: + - mw + - ad +fu: bar +`) + +func TestMergeConfig(t *testing.T) { + v := New() + v.SetConfigType("yml") + if err := v.ReadConfig(bytes.NewBuffer(yamlMergeExampleTgt)); err != nil { + t.Fatal(err) + } + + if pop := v.GetInt("hello.pop"); pop != 37890 { + t.Fatalf("pop != 37890, = %d", pop) + } + + if pop := v.GetInt("hello.lagrenum"); pop != 765432101234567 { + t.Fatalf("lagrenum != 765432101234567, = %d", pop) + } + + if pop := v.GetInt64("hello.lagrenum"); pop != int64(765432101234567) { + t.Fatalf("int64 lagrenum != 765432101234567, = %d", pop) + } + + if world := v.GetStringSlice("hello.world"); len(world) != 4 { + t.Fatalf("len(world) != 4, = %d", len(world)) + } + + if fu := v.GetString("fu"); fu != "" { + t.Fatalf("fu != \"\", = %s", fu) + } + + if err := v.MergeConfig(bytes.NewBuffer(yamlMergeExampleSrc)); err != nil { + t.Fatal(err) + } + + if pop := v.GetInt("hello.pop"); pop != 45000 { + t.Fatalf("pop != 45000, = %d", pop) + } + + if pop := v.GetInt("hello.lagrenum"); pop != 7654321001234567 { + t.Fatalf("lagrenum != 7654321001234567, = %d", pop) + } + + if pop := v.GetInt64("hello.lagrenum"); pop != int64(7654321001234567) { + t.Fatalf("int64 lagrenum != 7654321001234567, = %d", pop) + } + + if world := v.GetStringSlice("hello.world"); len(world) != 4 { + t.Fatalf("len(world) != 4, = %d", len(world)) + } + + if universe := v.GetStringSlice("hello.universe"); len(universe) != 2 { + t.Fatalf("len(universe) != 2, = %d", len(universe)) + } + + if fu := v.GetString("fu"); fu != "bar" { + t.Fatalf("fu != \"bar\", = %s", fu) + } +} + +func TestMergeConfigNoMerge(t *testing.T) { + v := New() + v.SetConfigType("yml") + if err := v.ReadConfig(bytes.NewBuffer(yamlMergeExampleTgt)); err != nil { + t.Fatal(err) + } + + if pop := v.GetInt("hello.pop"); pop != 37890 { + t.Fatalf("pop != 37890, = %d", pop) + } + + if world := v.GetStringSlice("hello.world"); len(world) != 4 { + t.Fatalf("len(world) != 4, = %d", len(world)) + } + + if fu := v.GetString("fu"); fu != "" { + t.Fatalf("fu != \"\", = %s", fu) + } + + if err := v.ReadConfig(bytes.NewBuffer(yamlMergeExampleSrc)); err != nil { + t.Fatal(err) + } + + if pop := v.GetInt("hello.pop"); pop != 45000 { + t.Fatalf("pop != 45000, = %d", pop) + } + + if world := v.GetStringSlice("hello.world"); len(world) != 0 { + t.Fatalf("len(world) != 0, = %d", len(world)) + } + + if universe := v.GetStringSlice("hello.universe"); len(universe) != 2 { + t.Fatalf("len(universe) != 2, = %d", len(universe)) + } + + if fu := v.GetString("fu"); fu != "bar" { + t.Fatalf("fu != \"bar\", = %s", fu) + } +} + +func TestUnmarshalingWithAliases(t *testing.T) { + v := New() + v.SetDefault("ID", 1) + v.Set("name", "Steve") + v.Set("lastname", "Owen") + + v.RegisterAlias("UserID", "ID") + v.RegisterAlias("Firstname", "name") + v.RegisterAlias("Surname", "lastname") + + type config struct { + ID int + FirstName string + Surname string + } + + var C config + err := v.Unmarshal(&C) + if err != nil { + t.Fatalf("unable to decode into struct, %v", err) + } + + assert.Equal(t, &config{ID: 1, FirstName: "Steve", Surname: "Owen"}, &C) +} + +func TestSetConfigNameClearsFileCache(t *testing.T) { + SetConfigFile("/tmp/config.yaml") + SetConfigName("default") + f, err := v.getConfigFile() + if err == nil { + t.Fatalf("config file cache should have been cleared") + } + assert.Empty(t, f) +} + +func TestShadowedNestedValue(t *testing.T) { + + config := `name: steve +clothing: + jacket: leather + trousers: denim + pants: + size: large +` + initConfig("yaml", config) + + assert.Equal(t, "steve", GetString("name")) + + polyester := "polyester" + SetDefault("clothing.shirt", polyester) + SetDefault("clothing.jacket.price", 100) + + assert.Equal(t, "leather", GetString("clothing.jacket")) + assert.Nil(t, Get("clothing.jacket.price")) + assert.Equal(t, polyester, GetString("clothing.shirt")) + + clothingSettings := AllSettings()["clothing"].(map[string]interface{}) + assert.Equal(t, "leather", clothingSettings["jacket"]) + assert.Equal(t, polyester, clothingSettings["shirt"]) +} + +func TestDotParameter(t *testing.T) { + initJSON() + // shoud take precedence over batters defined in jsonExample + r := bytes.NewReader([]byte(`{ "batters.batter": [ { "type": "Small" } ] }`)) + unmarshalReader(r, v.config) + + actual := Get("batters.batter") + expected := []interface{}{map[string]interface{}{"type": "Small"}} + assert.Equal(t, expected, actual) +} + +func TestCaseInsensitive(t *testing.T) { + for _, config := range []struct { + typ string + content string + }{ + {"yaml", ` +aBcD: 1 +eF: + gH: 2 + iJk: 3 + Lm: + nO: 4 + P: + Q: 5 + R: 6 +`}, + {"json", `{ + "aBcD": 1, + "eF": { + "iJk": 3, + "Lm": { + "P": { + "Q": 5, + "R": 6 + }, + "nO": 4 + }, + "gH": 2 + } +}`}, + {"toml", `aBcD = 1 +[eF] +gH = 2 +iJk = 3 +[eF.Lm] +nO = 4 +[eF.Lm.P] +Q = 5 +R = 6 +`}, + } { + doTestCaseInsensitive(t, config.typ, config.content) + } +} + +func TestCaseInsensitiveSet(t *testing.T) { + Reset() + m1 := map[string]interface{}{ + "Foo": 32, + "Bar": map[interface{}]interface { + }{ + "ABc": "A", + "cDE": "B"}, + } + + m2 := map[string]interface{}{ + "Foo": 52, + "Bar": map[interface{}]interface { + }{ + "bCd": "A", + "eFG": "B"}, + } + + Set("Given1", m1) + Set("Number1", 42) + + SetDefault("Given2", m2) + SetDefault("Number2", 52) + + // Verify SetDefault + if v := Get("number2"); v != 52 { + t.Fatalf("Expected 52 got %q", v) + } + + if v := Get("given2.foo"); v != 52 { + t.Fatalf("Expected 52 got %q", v) + } + + if v := Get("given2.bar.bcd"); v != "A" { + t.Fatalf("Expected A got %q", v) + } + + if _, ok := m2["Foo"]; !ok { + t.Fatal("Input map changed") + } + + // Verify Set + if v := Get("number1"); v != 42 { + t.Fatalf("Expected 42 got %q", v) + } + + if v := Get("given1.foo"); v != 32 { + t.Fatalf("Expected 32 got %q", v) + } + + if v := Get("given1.bar.abc"); v != "A" { + t.Fatalf("Expected A got %q", v) + } + + if _, ok := m1["Foo"]; !ok { + t.Fatal("Input map changed") + } +} + +func TestParseNested(t *testing.T) { + type duration struct { + Delay time.Duration + } + + type item struct { + Name string + Delay time.Duration + Nested duration + } + + config := `[[parent]] + delay="100ms" + [parent.nested] + delay="200ms" +` + initConfig("toml", config) + + var items []item + err := v.UnmarshalKey("parent", &items) + if err != nil { + t.Fatalf("unable to decode into struct, %v", err) + } + + assert.Equal(t, 1, len(items)) + assert.Equal(t, 100*time.Millisecond, items[0].Delay) + assert.Equal(t, 200*time.Millisecond, items[0].Nested.Delay) +} + +func doTestCaseInsensitive(t *testing.T, typ, config string) { + initConfig(typ, config) + Set("RfD", true) + assert.Equal(t, true, Get("rfd")) + assert.Equal(t, true, Get("rFD")) + assert.Equal(t, 1, cast.ToInt(Get("abcd"))) + assert.Equal(t, 1, cast.ToInt(Get("Abcd"))) + assert.Equal(t, 2, cast.ToInt(Get("ef.gh"))) + assert.Equal(t, 3, cast.ToInt(Get("ef.ijk"))) + assert.Equal(t, 4, cast.ToInt(Get("ef.lm.no"))) + assert.Equal(t, 5, cast.ToInt(Get("ef.lm.p.q"))) + +} + +func BenchmarkGetBool(b *testing.B) { + key := "BenchmarkGetBool" + v = New() + v.Set(key, true) + + for i := 0; i < b.N; i++ { + if !v.GetBool(key) { + b.Fatal("GetBool returned false") + } + } +} + +func BenchmarkGet(b *testing.B) { + key := "BenchmarkGet" + v = New() + v.Set(key, true) + + for i := 0; i < b.N; i++ { + if !v.Get(key).(bool) { + b.Fatal("Get returned false") + } + } +} + +// This is the "perfect result" for the above. +func BenchmarkGetBoolFromMap(b *testing.B) { + m := make(map[string]bool) + key := "BenchmarkGetBool" + m[key] = true + + for i := 0; i < b.N; i++ { + if !m[key] { + b.Fatal("Map value was false") + } + } +} diff --git a/vendor/github.com/stretchr/testify/.gitignore b/vendor/github.com/stretchr/testify/.gitignore new file mode 100644 index 000000000..5aacdb7cc --- /dev/null +++ b/vendor/github.com/stretchr/testify/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.DS_Store diff --git a/vendor/github.com/stretchr/testify/.travis.gofmt.sh b/vendor/github.com/stretchr/testify/.travis.gofmt.sh new file mode 100755 index 000000000..bfffdca8b --- /dev/null +++ b/vendor/github.com/stretchr/testify/.travis.gofmt.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -n "$(gofmt -l .)" ]; then + echo "Go code is not formatted:" + gofmt -d . + exit 1 +fi diff --git a/vendor/github.com/stretchr/testify/.travis.gogenerate.sh b/vendor/github.com/stretchr/testify/.travis.gogenerate.sh new file mode 100755 index 000000000..161b449cd --- /dev/null +++ b/vendor/github.com/stretchr/testify/.travis.gogenerate.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [[ "$TRAVIS_GO_VERSION" =~ ^1\.[45](\..*)?$ ]]; then + exit 0 +fi + +go get github.com/ernesto-jimenez/gogen/imports +go generate ./... +if [ -n "$(git diff)" ]; then + echo "Go generate had not been run" + git diff + exit 1 +fi diff --git a/vendor/github.com/stretchr/testify/.travis.govet.sh b/vendor/github.com/stretchr/testify/.travis.govet.sh new file mode 100755 index 000000000..f8fbba7a1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/.travis.govet.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +cd "$(dirname $0)" +DIRS=". assert require mock _codegen" +set -e +for subdir in $DIRS; do + pushd $subdir + go vet + popd +done diff --git a/vendor/github.com/stretchr/testify/.travis.yml b/vendor/github.com/stretchr/testify/.travis.yml new file mode 100644 index 000000000..6e51e63c2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/.travis.yml @@ -0,0 +1,15 @@ +language: go + +sudo: false + +go: + - 1.7 + - 1.8 + - 1.9 + - tip + +script: + - ./.travis.gogenerate.sh + - ./.travis.gofmt.sh + - ./.travis.govet.sh + - go test -v -race $(go list ./... | grep -v vendor) diff --git a/vendor/github.com/stretchr/testify/Gopkg.lock b/vendor/github.com/stretchr/testify/Gopkg.lock new file mode 100644 index 000000000..294cda093 --- /dev/null +++ b/vendor/github.com/stretchr/testify/Gopkg.lock @@ -0,0 +1,27 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/objx" + packages = ["."] + revision = "facf9a85c22f48d2f52f2380e4efce1768749a89" + version = "v0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "448ddae4702c6aded2555faafd390c537789bb1c483f70b0431e6634f73f2090" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/stretchr/testify/Gopkg.toml b/vendor/github.com/stretchr/testify/Gopkg.toml new file mode 100644 index 000000000..a16374c8b --- /dev/null +++ b/vendor/github.com/stretchr/testify/Gopkg.toml @@ -0,0 +1,16 @@ +[prune] + unused-packages = true + non-go = true + go-tests = true + +[[constraint]] + name = "github.com/davecgh/go-spew" + version = "~1.1.0" + +[[constraint]] + name = "github.com/pmezard/go-difflib" + version = "~1.0.0" + +[[constraint]] + name = "github.com/stretchr/objx" + version = "~0.1.0" diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE new file mode 100644 index 000000000..473b670a7 --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/README.md b/vendor/github.com/stretchr/testify/README.md new file mode 100644 index 000000000..d3b942b7f --- /dev/null +++ b/vendor/github.com/stretchr/testify/README.md @@ -0,0 +1,301 @@ +Testify - Thou Shalt Write Tests +================================ + +[![Build Status](https://travis-ci.org/stretchr/testify.svg)](https://travis-ci.org/stretchr/testify) [![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/testify)](https://goreportcard.com/report/github.com/stretchr/testify) [![GoDoc](https://godoc.org/github.com/stretchr/testify?status.svg)](https://godoc.org/github.com/stretchr/testify) + +Go code (golang) set of packages that provide many tools for testifying that your code will behave as you intend. + +Features include: + + * [Easy assertions](#assert-package) + * [Mocking](#mock-package) + * [Testing suite interfaces and functions](#suite-package) + +Get started: + + * Install testify with [one line of code](#installation), or [update it with another](#staying-up-to-date) + * For an introduction to writing test code in Go, see http://golang.org/doc/code.html#Testing + * Check out the API Documentation http://godoc.org/github.com/stretchr/testify + * To make your testing life easier, check out our other project, [gorc](http://github.com/stretchr/gorc) + * A little about [Test-Driven Development (TDD)](http://en.wikipedia.org/wiki/Test-driven_development) + + + +[`assert`](http://godoc.org/github.com/stretchr/testify/assert "API documentation") package +------------------------------------------------------------------------------------------- + +The `assert` package provides some helpful methods that allow you to write better test code in Go. + + * Prints friendly, easy to read failure descriptions + * Allows for very readable code + * Optionally annotate each assertion with a message + +See it in action: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + + // assert equality + assert.Equal(t, 123, 123, "they should be equal") + + // assert inequality + assert.NotEqual(t, 123, 456, "they should not be equal") + + // assert for nil (good for errors) + assert.Nil(t, object) + + // assert for not nil (good when you expect something) + if assert.NotNil(t, object) { + + // now we know that object isn't nil, we are safe to make + // further assertions without causing any errors + assert.Equal(t, "Something", object.Value) + + } + +} +``` + + * Every assert func takes the `testing.T` object as the first argument. This is how it writes the errors out through the normal `go test` capabilities. + * Every assert func returns a bool indicating whether the assertion was successful or not, this is useful for if you want to go on making further assertions under certain conditions. + +if you assert many times, use the below: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + assert := assert.New(t) + + // assert equality + assert.Equal(123, 123, "they should be equal") + + // assert inequality + assert.NotEqual(123, 456, "they should not be equal") + + // assert for nil (good for errors) + assert.Nil(object) + + // assert for not nil (good when you expect something) + if assert.NotNil(object) { + + // now we know that object isn't nil, we are safe to make + // further assertions without causing any errors + assert.Equal("Something", object.Value) + } +} +``` + +[`require`](http://godoc.org/github.com/stretchr/testify/require "API documentation") package +--------------------------------------------------------------------------------------------- + +The `require` package provides same global functions as the `assert` package, but instead of returning a boolean result they terminate current test. + +See [t.FailNow](http://golang.org/pkg/testing/#T.FailNow) for details. + +[`mock`](http://godoc.org/github.com/stretchr/testify/mock "API documentation") package +---------------------------------------------------------------------------------------- + +The `mock` package provides a mechanism for easily writing mock objects that can be used in place of real objects when writing test code. + +An example test function that tests a piece of code that relies on an external object `testObj`, can setup expectations (testify) and assert that they indeed happened: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/mock" +) + +/* + Test objects +*/ + +// MyMockedObject is a mocked object that implements an interface +// that describes an object that the code I am testing relies on. +type MyMockedObject struct{ + mock.Mock +} + +// DoSomething is a method on MyMockedObject that implements some interface +// and just records the activity, and returns what the Mock object tells it to. +// +// In the real object, this method would do something useful, but since this +// is a mocked object - we're just going to stub it out. +// +// NOTE: This method is not being tested here, code that uses this object is. +func (m *MyMockedObject) DoSomething(number int) (bool, error) { + + args := m.Called(number) + return args.Bool(0), args.Error(1) + +} + +/* + Actual test functions +*/ + +// TestSomething is an example of how to use our test object to +// make assertions about some target code we are testing. +func TestSomething(t *testing.T) { + + // create an instance of our test object + testObj := new(MyMockedObject) + + // setup expectations + testObj.On("DoSomething", 123).Return(true, nil) + + // call the code we are testing + targetFuncThatDoesSomethingWithObj(testObj) + + // assert that the expectations were met + testObj.AssertExpectations(t) + +} +``` + +For more information on how to write mock code, check out the [API documentation for the `mock` package](http://godoc.org/github.com/stretchr/testify/mock). + +You can use the [mockery tool](http://github.com/vektra/mockery) to autogenerate the mock code against an interface as well, making using mocks much quicker. + +[`suite`](http://godoc.org/github.com/stretchr/testify/suite "API documentation") package +----------------------------------------------------------------------------------------- + +The `suite` package provides functionality that you might be used to from more common object oriented languages. With it, you can build a testing suite as a struct, build setup/teardown methods and testing methods on your struct, and run them with 'go test' as per normal. + +An example suite is shown below: + +```go +// Basic imports +import ( + "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +// Define the suite, and absorb the built-in basic suite +// functionality from testify - including a T() method which +// returns the current testing context +type ExampleTestSuite struct { + suite.Suite + VariableThatShouldStartAtFive int +} + +// Make sure that VariableThatShouldStartAtFive is set to five +// before each test +func (suite *ExampleTestSuite) SetupTest() { + suite.VariableThatShouldStartAtFive = 5 +} + +// All methods that begin with "Test" are run as tests within a +// suite. +func (suite *ExampleTestSuite) TestExample() { + assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestExampleTestSuite(t *testing.T) { + suite.Run(t, new(ExampleTestSuite)) +} +``` + +For a more complete example, using all of the functionality provided by the suite package, look at our [example testing suite](https://github.com/stretchr/testify/blob/master/suite/suite_test.go) + +For more information on writing suites, check out the [API documentation for the `suite` package](http://godoc.org/github.com/stretchr/testify/suite). + +`Suite` object has assertion methods: + +```go +// Basic imports +import ( + "testing" + "github.com/stretchr/testify/suite" +) + +// Define the suite, and absorb the built-in basic suite +// functionality from testify - including assertion methods. +type ExampleTestSuite struct { + suite.Suite + VariableThatShouldStartAtFive int +} + +// Make sure that VariableThatShouldStartAtFive is set to five +// before each test +func (suite *ExampleTestSuite) SetupTest() { + suite.VariableThatShouldStartAtFive = 5 +} + +// All methods that begin with "Test" are run as tests within a +// suite. +func (suite *ExampleTestSuite) TestExample() { + suite.Equal(suite.VariableThatShouldStartAtFive, 5) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestExampleTestSuite(t *testing.T) { + suite.Run(t, new(ExampleTestSuite)) +} +``` + +------ + +Installation +============ + +To install Testify, use `go get`: + + go get github.com/stretchr/testify + +This will then make the following packages available to you: + + github.com/stretchr/testify/assert + github.com/stretchr/testify/mock + github.com/stretchr/testify/http + +Import the `testify/assert` package into your code using this template: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + + assert.True(t, true, "True is true!") + +} +``` + +------ + +Staying up to date +================== + +To update Testify to the latest version, use `go get -u github.com/stretchr/testify`. + +------ + +Contributing +============ + +Please feel free to submit issues, fork the repository and send pull requests! + +When submitting an issue, we ask that you please include a complete test function that demonstrates the issue. Extra credit for those using Testify to write the test code that demonstrates it. diff --git a/vendor/github.com/stretchr/testify/_codegen/main.go b/vendor/github.com/stretchr/testify/_codegen/main.go new file mode 100644 index 000000000..2e5e8124f --- /dev/null +++ b/vendor/github.com/stretchr/testify/_codegen/main.go @@ -0,0 +1,316 @@ +// This program reads all assertion functions from the assert package and +// automatically generates the corresponding requires and forwarded assertions + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/build" + "go/doc" + "go/format" + "go/importer" + "go/parser" + "go/token" + "go/types" + "io" + "io/ioutil" + "log" + "os" + "path" + "regexp" + "strings" + "text/template" + + "github.com/ernesto-jimenez/gogen/imports" +) + +var ( + pkg = flag.String("assert-path", "github.com/stretchr/testify/assert", "Path to the assert package") + includeF = flag.Bool("include-format-funcs", false, "include format functions such as Errorf and Equalf") + outputPkg = flag.String("output-package", "", "package for the resulting code") + tmplFile = flag.String("template", "", "What file to load the function template from") + out = flag.String("out", "", "What file to write the source code to") +) + +func main() { + flag.Parse() + + scope, docs, err := parsePackageSource(*pkg) + if err != nil { + log.Fatal(err) + } + + importer, funcs, err := analyzeCode(scope, docs) + if err != nil { + log.Fatal(err) + } + + if err := generateCode(importer, funcs); err != nil { + log.Fatal(err) + } +} + +func generateCode(importer imports.Importer, funcs []testFunc) error { + buff := bytes.NewBuffer(nil) + + tmplHead, tmplFunc, err := parseTemplates() + if err != nil { + return err + } + + // Generate header + if err := tmplHead.Execute(buff, struct { + Name string + Imports map[string]string + }{ + *outputPkg, + importer.Imports(), + }); err != nil { + return err + } + + // Generate funcs + for _, fn := range funcs { + buff.Write([]byte("\n\n")) + if err := tmplFunc.Execute(buff, &fn); err != nil { + return err + } + } + + code, err := format.Source(buff.Bytes()) + if err != nil { + return err + } + + // Write file + output, err := outputFile() + if err != nil { + return err + } + defer output.Close() + _, err = io.Copy(output, bytes.NewReader(code)) + return err +} + +func parseTemplates() (*template.Template, *template.Template, error) { + tmplHead, err := template.New("header").Parse(headerTemplate) + if err != nil { + return nil, nil, err + } + if *tmplFile != "" { + f, err := ioutil.ReadFile(*tmplFile) + if err != nil { + return nil, nil, err + } + funcTemplate = string(f) + } + tmpl, err := template.New("function").Parse(funcTemplate) + if err != nil { + return nil, nil, err + } + return tmplHead, tmpl, nil +} + +func outputFile() (*os.File, error) { + filename := *out + if filename == "-" || (filename == "" && *tmplFile == "") { + return os.Stdout, nil + } + if filename == "" { + filename = strings.TrimSuffix(strings.TrimSuffix(*tmplFile, ".tmpl"), ".go") + ".go" + } + return os.Create(filename) +} + +// analyzeCode takes the types scope and the docs and returns the import +// information and information about all the assertion functions. +func analyzeCode(scope *types.Scope, docs *doc.Package) (imports.Importer, []testFunc, error) { + testingT := scope.Lookup("TestingT").Type().Underlying().(*types.Interface) + + importer := imports.New(*outputPkg) + var funcs []testFunc + // Go through all the top level functions + for _, fdocs := range docs.Funcs { + // Find the function + obj := scope.Lookup(fdocs.Name) + + fn, ok := obj.(*types.Func) + if !ok { + continue + } + // Check function signature has at least two arguments + sig := fn.Type().(*types.Signature) + if sig.Params().Len() < 2 { + continue + } + // Check first argument is of type testingT + first, ok := sig.Params().At(0).Type().(*types.Named) + if !ok { + continue + } + firstType, ok := first.Underlying().(*types.Interface) + if !ok { + continue + } + if !types.Implements(firstType, testingT) { + continue + } + + // Skip functions ending with f + if strings.HasSuffix(fdocs.Name, "f") && !*includeF { + continue + } + + funcs = append(funcs, testFunc{*outputPkg, fdocs, fn}) + importer.AddImportsFrom(sig.Params()) + } + return importer, funcs, nil +} + +// parsePackageSource returns the types scope and the package documentation from the package +func parsePackageSource(pkg string) (*types.Scope, *doc.Package, error) { + pd, err := build.Import(pkg, ".", 0) + if err != nil { + return nil, nil, err + } + + fset := token.NewFileSet() + files := make(map[string]*ast.File) + fileList := make([]*ast.File, len(pd.GoFiles)) + for i, fname := range pd.GoFiles { + src, err := ioutil.ReadFile(path.Join(pd.SrcRoot, pd.ImportPath, fname)) + if err != nil { + return nil, nil, err + } + f, err := parser.ParseFile(fset, fname, src, parser.ParseComments|parser.AllErrors) + if err != nil { + return nil, nil, err + } + files[fname] = f + fileList[i] = f + } + + cfg := types.Config{ + Importer: importer.Default(), + } + info := types.Info{ + Defs: make(map[*ast.Ident]types.Object), + } + tp, err := cfg.Check(pkg, fset, fileList, &info) + if err != nil { + return nil, nil, err + } + + scope := tp.Scope() + + ap, _ := ast.NewPackage(fset, files, nil, nil) + docs := doc.New(ap, pkg, 0) + + return scope, docs, nil +} + +type testFunc struct { + CurrentPkg string + DocInfo *doc.Func + TypeInfo *types.Func +} + +func (f *testFunc) Qualifier(p *types.Package) string { + if p == nil || p.Name() == f.CurrentPkg { + return "" + } + return p.Name() +} + +func (f *testFunc) Params() string { + sig := f.TypeInfo.Type().(*types.Signature) + params := sig.Params() + p := "" + comma := "" + to := params.Len() + var i int + + if sig.Variadic() { + to-- + } + for i = 1; i < to; i++ { + param := params.At(i) + p += fmt.Sprintf("%s%s %s", comma, param.Name(), types.TypeString(param.Type(), f.Qualifier)) + comma = ", " + } + if sig.Variadic() { + param := params.At(params.Len() - 1) + p += fmt.Sprintf("%s%s ...%s", comma, param.Name(), types.TypeString(param.Type().(*types.Slice).Elem(), f.Qualifier)) + } + return p +} + +func (f *testFunc) ForwardedParams() string { + sig := f.TypeInfo.Type().(*types.Signature) + params := sig.Params() + p := "" + comma := "" + to := params.Len() + var i int + + if sig.Variadic() { + to-- + } + for i = 1; i < to; i++ { + param := params.At(i) + p += fmt.Sprintf("%s%s", comma, param.Name()) + comma = ", " + } + if sig.Variadic() { + param := params.At(params.Len() - 1) + p += fmt.Sprintf("%s%s...", comma, param.Name()) + } + return p +} + +func (f *testFunc) ParamsFormat() string { + return strings.Replace(f.Params(), "msgAndArgs", "msg string, args", 1) +} + +func (f *testFunc) ForwardedParamsFormat() string { + return strings.Replace(f.ForwardedParams(), "msgAndArgs", "append([]interface{}{msg}, args...)", 1) +} + +func (f *testFunc) Comment() string { + return "// " + strings.Replace(strings.TrimSpace(f.DocInfo.Doc), "\n", "\n// ", -1) +} + +func (f *testFunc) CommentFormat() string { + search := fmt.Sprintf("%s", f.DocInfo.Name) + replace := fmt.Sprintf("%sf", f.DocInfo.Name) + comment := strings.Replace(f.Comment(), search, replace, -1) + exp := regexp.MustCompile(replace + `\(((\(\)|[^)])+)\)`) + return exp.ReplaceAllString(comment, replace+`($1, "error message %s", "formatted")`) +} + +func (f *testFunc) CommentWithoutT(receiver string) string { + search := fmt.Sprintf("assert.%s(t, ", f.DocInfo.Name) + replace := fmt.Sprintf("%s.%s(", receiver, f.DocInfo.Name) + return strings.Replace(f.Comment(), search, replace, -1) +} + +var headerTemplate = `/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package {{.Name}} + +import ( +{{range $path, $name := .Imports}} + {{$name}} "{{$path}}"{{end}} +) +` + +var funcTemplate = `{{.Comment}} +func (fwd *AssertionsForwarder) {{.DocInfo.Name}}({{.Params}}) bool { + return assert.{{.DocInfo.Name}}({{.ForwardedParams}}) +}` diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go new file mode 100644 index 000000000..ae06a54e2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -0,0 +1,349 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { + return Condition(t, comp, append([]interface{}{msg}, args...)...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return Contains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + return DirExists(t, path, append([]interface{}{msg}, args...)...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return Empty(t, object, append([]interface{}{msg}, args...)...) +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + return Error(t, err, append([]interface{}{msg}, args...)...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + return False(t, value, append([]interface{}{msg}, args...)...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + return FileExists(t, path, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + return Len(t, object, length, append([]interface{}{msg}, args...)...) +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return Nil(t, object, append([]interface{}{msg}, args...)...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + return NoError(t, err, append([]interface{}{msg}, args...)...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return NotEmpty(t, object, append([]interface{}{msg}, args...)...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return NotNil(t, object, append([]interface{}{msg}, args...)...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + return NotPanics(t, f, append([]interface{}{msg}, args...)...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + return NotZero(t, i, append([]interface{}{msg}, args...)...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + return Panics(t, f, append([]interface{}{msg}, args...)...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return Subset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + return True(t, value, append([]interface{}{msg}, args...)...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + return Zero(t, i, append([]interface{}{msg}, args...)...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl new file mode 100644 index 000000000..c5cc66f43 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentFormat}} +func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { + return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 000000000..ffa5428f3 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,686 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + return Condition(a.t, comp, msgAndArgs...) +} + +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { + return Conditionf(a.t, comp, msg, args...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return Contains(a.t, s, contains, msgAndArgs...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { + return DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { + return DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + return ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + return ElementsMatchf(a.t, listA, listB, msg, args...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + return Empty(a.t, object, msgAndArgs...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + return Emptyf(a.t, object, msg, args...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + return EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + return EqualErrorf(a.t, theError, errString, msg, args...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123)) +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Equalf(a.t, expected, actual, msg, args...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + return Error(a.t, err, msgAndArgs...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + return Errorf(a.t, err, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. +// +// a.Exactly(int32(123), int64(123)) +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Exactlyf(a.t, expected, actual, msg, args...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + return Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + return FailNow(a.t, failureMessage, msgAndArgs...) +} + +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { + return FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { + return Failf(a.t, failureMessage, msg, args...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool) +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + return False(a.t, value, msgAndArgs...) +} + +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + return Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { + return FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { + return FileExistsf(a.t, path, msg, args...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPErrorf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + return Implementsf(a.t, interfaceObject, object, msg, args...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return IsType(a.t, expectedType, object, msgAndArgs...) +} + +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + return IsTypef(a.t, expectedType, object, msg, args...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + return JSONEqf(a.t, expected, actual, msg, args...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3) +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + return Len(a.t, object, length, msgAndArgs...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + return Lenf(a.t, object, length, msg, args...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err) +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + return Nil(a.t, object, msgAndArgs...) +} + +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + return Nilf(a.t, object, msg, args...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + return NoError(a.t, err, msgAndArgs...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + return NoErrorf(a.t, err, msg, args...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return NotContainsf(a.t, s, contains, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + return NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + return NotEmptyf(a.t, object, msg, args...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return NotEqualf(a.t, expected, actual, msg, args...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err) +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + return NotNil(a.t, object, msgAndArgs...) +} + +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + return NotNilf(a.t, object, msg, args...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ RemainCalm() }) +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return NotPanics(a.t, f, msgAndArgs...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + return NotPanicsf(a.t, f, msg, args...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + return NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + return NotZero(a.t, i, msgAndArgs...) +} + +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { + return NotZerof(a.t, i, msg, args...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ GoCrazy() }) +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return Panics(a.t, f, msgAndArgs...) +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + return PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + return PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + return Panicsf(a.t, f, msg, args...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return Regexp(a.t, rx, str, msgAndArgs...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return Regexpf(a.t, rx, str, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + return Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return Subsetf(a.t, list, subset, msg, args...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool) +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + return True(a.t, value, msgAndArgs...) +} + +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + return Truef(a.t, value, msg, args...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + return WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// Zero asserts that i is the zero value for its type. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + return Zero(a.t, i, msgAndArgs...) +} + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { + return Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl new file mode 100644 index 000000000..99f9acfbb --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { + return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 000000000..47bda7786 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,1256 @@ +package assert + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + + if expected == nil || actual == nil { + return expected == actual + } + if exp, ok := expected.([]byte); ok { + act, ok := actual.([]byte) + if !ok { + return false + } else if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) + } + return reflect.DeepEqual(expected, actual) + +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occurred in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + // The breaks below failed to terminate the loop, and we ran off the + // end of the call stack. + break + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + if len(parts) > 1 { + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +// getWhitespaceString returns a string that is long enough to overwrite the default +// output from the go testing framework. +func getWhitespaceString() string { + + _, file, line, ok := runtime.Caller(1) + if !ok { + return "" + } + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Aligns the provided message so that all lines after the first line start at the same location as the first line. +// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). +// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// basis on which the alignment occurs). +func indentMessageLines(message string, longestLabelLen int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + // no need to align first line because it starts at the correct location (after the label) + if i != 0 { + // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab + outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + content := []labeledContent{ + {"Error Trace", strings.Join(CallerInfo(), "\n\r\t\t\t")}, + {"Error", failureMessage}, + } + + // Add test name if the Go version supports it + if n, ok := t.(interface { + Name() string + }); ok { + content = append(content, labeledContent{"Test", n.Name()}) + } + + message := messageFromMsgAndArgs(msgAndArgs...) + if len(message) > 0 { + content = append(content, labeledContent{"Messages", message}) + } + + t.Errorf("%s", "\r"+getWhitespaceString()+labeledOutput(content...)) + + return false +} + +type labeledContent struct { + label string + content string +} + +// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: +// +// \r\t{{label}}:{{align_spaces}}\t{{content}}\n +// +// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. +// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this +// alignment is achieved, "\t{{content}}\n" is added for the output. +// +// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. +func labeledOutput(content ...labeledContent) string { + longestLabel := 0 + for _, v := range content { + if len(v.label) > longestLabel { + longestLabel = len(v.label) + } + } + var output string + for _, v := range content { + output += "\r\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" + } + return output +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) + } + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) + } + + return true +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if !ObjectsAreEqual(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// formatUnequalValues takes two values of arbitrary types and returns string +// representations appropriate to be presented to the user. +// +// If the values are not of like type, the returned strings will be prefixed +// with the type name, and the value will be enclosed in parenthesis similar +// to a type conversion in the Go grammar. +func formatUnequalValues(expected, actual interface{}) (e string, a string) { + if reflect.TypeOf(expected) != reflect.TypeOf(actual) { + return fmt.Sprintf("%T(%#v)", expected, expected), + fmt.Sprintf("%T(%#v)", actual, actual) + } + + return fmt.Sprintf("%#v", expected), + fmt.Sprintf("%#v", actual) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123)) +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqualValues(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal in value and type. +// +// assert.Exactly(t, int32(123), int64(123)) +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err) +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if !isNil(object) { + return true + } + return Fail(t, "Expected value not to be nil.", msgAndArgs...) +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err) +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + // get nil case out of the way + if object == nil { + return true + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + // collection types are empty when they have no element + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // pointers are empty if nil or if the value they point to is empty + case reflect.Ptr: + if objValue.IsNil() { + return true + } + deref := objValue.Elem().Interface() + return isEmpty(deref) + // for all other types, compare against the zero value + default: + zero := reflect.Zero(objValue.Type()) + return reflect.DeepEqual(object, zero.Interface()) + } +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool) +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool) +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + if reflect.TypeOf(list).Kind() == reflect.Map { + mapKeys := listValue.MapKeys() + for i := 0; i < len(mapKeys); i++ { + if ObjectsAreEqual(mapKeys[i].Interface(), element) { + return true, true + } + } + return true, false + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if subset == nil { + return true // we consider nil to be equal to the nil set + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + } + } + + return true +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if subset == nil { + return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if isEmpty(listA) && isEmpty(listB) { + return true + } + + aKind := reflect.TypeOf(listA).Kind() + bKind := reflect.TypeOf(listB).Kind() + + if aKind != reflect.Array && aKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) + } + + if bKind != reflect.Array && bKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) + } + + aValue := reflect.ValueOf(listA) + bValue := reflect.ValueOf(listB) + + aLen := aValue.Len() + bLen := bValue.Len() + + if aLen != bLen { + return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) + } + + // Mark indexes in bValue that we already used + visited := make([]bool, bLen) + for i := 0; i < aLen; i++ { + element := aValue.Index(i).Interface() + found := false + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + if ObjectsAreEqual(bValue.Index(j).Interface(), element) { + visited[j] = true + found = true + break + } + } + if !found { + return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) + } + } + + return true +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ GoCrazy() }) +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + funcDidPanic, panicValue := didPanic(f) + if !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + if panicValue != expected { + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", f, expected, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ RemainCalm() }) +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + case time.Duration: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) + if !result { + return result + } + } + + return true +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Map || + reflect.TypeOf(expected).Kind() != reflect.Map { + return Fail(t, "Arguments must be maps", msgAndArgs...) + } + + expectedMap := reflect.ValueOf(expected) + actualMap := reflect.ValueOf(actual) + + if expectedMap.Len() != actualMap.Len() { + return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) + } + + for _, k := range expectedMap.MapKeys() { + ev := expectedMap.MapIndex(k) + av := actualMap.MapIndex(k) + + if !ev.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) + } + + if !av.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) + } + + if !InDelta( + t, + ev.Interface(), + av.Interface(), + delta, + msgAndArgs..., + ) { + return false + } + } + + return true +} + +func calcRelativeError(expected, actual interface{}) (float64, error) { + af, aok := toFloat(expected) + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + } + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") + } + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) + } + + return math.Abs(af-bf) / math.Abs(af), nil +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) + } + + return true +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if err != nil { + return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) + } + + return true +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + + if err == nil { + return Fail(t, "An error is expected but got nil.", msgAndArgs...) + } + + return true +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + if !Error(t, theError, msgAndArgs...) { + return false + } + expected := errString + actual := theError.Error() + // don't need to use deep equals here, we know they are both strings + if expected != actual { + return Fail(t, fmt.Sprintf("Error message not equal:\n"+ + "expected: %q\n"+ + "actual : %q", expected, actual), msgAndArgs...) + } + return true +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} + +// Zero asserts that i is the zero value for its type. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// NotZero asserts that i is not the zero value for its type. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) + } + return true +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if !info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) + } + return true +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + var expectedJSONAsInterface, actualJSONAsInterface interface{} + + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return "\n\nDiff:\n" + diff +} + +// validateEqualArgs checks whether provided arguments can be safely used in the +// Equal/NotEqual functions. +func validateEqualArgs(expected, actual interface{}) error { + if isFunction(expected) || isFunction(actual) { + return errors.New("cannot take func type as argument") + } + return nil +} + +func isFunction(arg interface{}) bool { + if arg == nil { + return false + } + return reflect.TypeOf(arg).Kind() == reflect.Func +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions_test.go b/vendor/github.com/stretchr/testify/assert/assertions_test.go new file mode 100644 index 000000000..6757bd138 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions_test.go @@ -0,0 +1,1581 @@ +package assert + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "testing" + "time" +) + +var ( + i interface{} + zeros = []interface{}{ + false, + byte(0), + complex64(0), + complex128(0), + float32(0), + float64(0), + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + rune(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + uintptr(0), + "", + [0]interface{}{}, + []interface{}(nil), + struct{ x int }{}, + (*interface{})(nil), + (func())(nil), + nil, + interface{}(nil), + map[interface{}]interface{}(nil), + (chan interface{})(nil), + (<-chan interface{})(nil), + (chan<- interface{})(nil), + } + nonZeros = []interface{}{ + true, + byte(1), + complex64(1), + complex128(1), + float32(1), + float64(1), + int(1), + int8(1), + int16(1), + int32(1), + int64(1), + rune(1), + uint(1), + uint8(1), + uint16(1), + uint32(1), + uint64(1), + uintptr(1), + "s", + [1]interface{}{1}, + []interface{}{}, + struct{ x int }{1}, + (*interface{})(&i), + (func())(func() {}), + interface{}(1), + map[interface{}]interface{}{}, + (chan interface{})(make(chan interface{})), + (<-chan interface{})(make(chan interface{})), + (chan<- interface{})(make(chan interface{})), + } +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +func TestObjectsAreEqual(t *testing.T) { + + if !ObjectsAreEqual("Hello World", "Hello World") { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123, 123) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123.5, 123.5) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual([]byte("Hello World"), []byte("Hello World")) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(nil, nil) { + t.Error("objectsAreEqual should return true") + } + if ObjectsAreEqual(map[int]int{5: 10}, map[int]int{10: 20}) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual('x', "x") { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual("x", 'x') { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0, 0.1) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0.1, 0) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(uint32(10), int32(10)) { + t.Error("objectsAreEqual should return false") + } + if !ObjectsAreEqualValues(uint32(10), int32(10)) { + t.Error("ObjectsAreEqualValues should return true") + } + if ObjectsAreEqualValues(0, nil) { + t.Fail() + } + if ObjectsAreEqualValues(nil, 0) { + t.Fail() + } + +} + +func TestImplements(t *testing.T) { + + mockT := new(testing.T) + + if !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } + if Implements(mockT, (*AssertionTesterInterface)(nil), nil) { + t.Error("Implements method should return false: nil does not implement AssertionTesterInterface") + } + +} + +func TestIsType(t *testing.T) { + + mockT := new(testing.T) + + if !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqual(t *testing.T) { + + mockT := new(testing.T) + + if !Equal(mockT, "Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !Equal(mockT, 123, 123) { + t.Error("Equal should return true") + } + if !Equal(mockT, 123.5, 123.5) { + t.Error("Equal should return true") + } + if !Equal(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !Equal(mockT, nil, nil) { + t.Error("Equal should return true") + } + if !Equal(mockT, int32(123), int32(123)) { + t.Error("Equal should return true") + } + if !Equal(mockT, uint64(123), uint64(123)) { + t.Error("Equal should return true") + } + if !Equal(mockT, &struct{}{}, &struct{}{}) { + t.Error("Equal should return true (pointer equality is based on equality of underlying value)") + } + var m map[string]interface{} + if Equal(mockT, m["bar"], "something") { + t.Error("Equal should return false") + } +} + +// bufferT implements TestingT. Its implementation of Errorf writes the output that would be produced by +// testing.T.Errorf to an internal bytes.Buffer. +type bufferT struct { + buf bytes.Buffer +} + +func (t *bufferT) Errorf(format string, args ...interface{}) { + // implementation of decorate is copied from testing.T + decorate := func(s string) string { + _, file, line, ok := runtime.Caller(3) // decorate + log + public function. + if ok { + // Truncate file name at last file name separator. + if index := strings.LastIndex(file, "/"); index >= 0 { + file = file[index+1:] + } else if index = strings.LastIndex(file, "\\"); index >= 0 { + file = file[index+1:] + } + } else { + file = "???" + line = 1 + } + buf := new(bytes.Buffer) + // Every line is indented at least one tab. + buf.WriteByte('\t') + fmt.Fprintf(buf, "%s:%d: ", file, line) + lines := strings.Split(s, "\n") + if l := len(lines); l > 1 && lines[l-1] == "" { + lines = lines[:l-1] + } + for i, line := range lines { + if i > 0 { + // Second and subsequent lines are indented an extra tab. + buf.WriteString("\n\t\t") + } + buf.WriteString(line) + } + buf.WriteByte('\n') + return buf.String() + } + t.buf.WriteString(decorate(fmt.Sprintf(format, args...))) +} + +func TestEqualFormatting(t *testing.T) { + for i, currCase := range []struct { + equalWant string + equalGot string + msgAndArgs []interface{} + want string + }{ + {equalWant: "want", equalGot: "got", want: "\tassertions.go:[0-9]+: \r \r\tError Trace:\t\n\t\t\r\tError: \tNot equal: \n\t\t\r\t \texpected: \"want\"\n\t\t\r\t \tactual : \"got\"\n"}, + {equalWant: "want", equalGot: "got", msgAndArgs: []interface{}{"hello, %v!", "world"}, want: "\tassertions.go:[0-9]+: \r \r\tError Trace:\t\n\t\t\r\tError: \tNot equal: \n\t\t\r\t \texpected: \"want\"\n\t\t\r\t \tactual : \"got\"\n\t\t\r\tMessages: \thello, world!\n"}, + } { + mockT := &bufferT{} + Equal(mockT, currCase.equalWant, currCase.equalGot, currCase.msgAndArgs...) + Regexp(t, regexp.MustCompile(currCase.want), mockT.buf.String(), "Case %d", i) + } +} + +func TestFormatUnequalValues(t *testing.T) { + expected, actual := formatUnequalValues("foo", "bar") + Equal(t, `"foo"`, expected, "value should not include type") + Equal(t, `"bar"`, actual, "value should not include type") + + expected, actual = formatUnequalValues(123, 123) + Equal(t, `123`, expected, "value should not include type") + Equal(t, `123`, actual, "value should not include type") + + expected, actual = formatUnequalValues(int64(123), int32(123)) + Equal(t, `int64(123)`, expected, "value should include type") + Equal(t, `int32(123)`, actual, "value should include type") + + expected, actual = formatUnequalValues(int64(123), nil) + Equal(t, `int64(123)`, expected, "value should include type") + Equal(t, `()`, actual, "value should include type") + + type testStructType struct { + Val string + } + + expected, actual = formatUnequalValues(&testStructType{Val: "test"}, &testStructType{Val: "test"}) + Equal(t, `&assert.testStructType{Val:"test"}`, expected, "value should not include type annotation") + Equal(t, `&assert.testStructType{Val:"test"}`, actual, "value should not include type annotation") +} + +func TestNotNil(t *testing.T) { + + mockT := new(testing.T) + + if !NotNil(mockT, new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if NotNil(mockT, nil) { + t.Error("NotNil should return false: object is nil") + } + if NotNil(mockT, (*struct{})(nil)) { + t.Error("NotNil should return false: object is (*struct{})(nil)") + } + +} + +func TestNil(t *testing.T) { + + mockT := new(testing.T) + + if !Nil(mockT, nil) { + t.Error("Nil should return true: object is nil") + } + if !Nil(mockT, (*struct{})(nil)) { + t.Error("Nil should return true: object is (*struct{})(nil)") + } + if Nil(mockT, new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrue(t *testing.T) { + + mockT := new(testing.T) + + if !True(mockT, true) { + t.Error("True should return true") + } + if True(mockT, false) { + t.Error("True should return false") + } + +} + +func TestFalse(t *testing.T) { + + mockT := new(testing.T) + + if !False(mockT, false) { + t.Error("False should return true") + } + if False(mockT, true) { + t.Error("False should return false") + } + +} + +func TestExactly(t *testing.T) { + + mockT := new(testing.T) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if Exactly(mockT, a, b) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, d) { + t.Error("Exactly should return false") + } + if !Exactly(mockT, a, c) { + t.Error("Exactly should return true") + } + + if Exactly(mockT, nil, a) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqual(t *testing.T) { + + mockT := new(testing.T) + + if !NotEqual(mockT, "Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123, 1234) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, []byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } + funcA := func() int { return 23 } + funcB := func() int { return 42 } + if NotEqual(mockT, funcA, funcB) { + t.Error("NotEqual should return false") + } + + if NotEqual(mockT, "Hello World", "Hello World") { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123, 123) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123.5, 123.5) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, &struct{}{}, &struct{}{}) { + t.Error("NotEqual should return false") + } +} + +type A struct { + Name, Value string +} + +func TestContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + complexList := []*A{ + {"b", "c"}, + {"d", "e"}, + {"g", "h"}, + {"j", "k"}, + } + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + if !Contains(mockT, "Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if Contains(mockT, "Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !Contains(mockT, list, "Bar") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Bar\"") + } + if Contains(mockT, list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + if !Contains(mockT, complexList, &A{"g", "h"}) { + t.Error("Contains should return true: complexList contains {\"g\", \"h\"}") + } + if Contains(mockT, complexList, &A{"g", "e"}) { + t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") + } + if Contains(mockT, complexList, &A{"g", "e"}) { + t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") + } + if !Contains(mockT, simpleMap, "Foo") { + t.Error("Contains should return true: \"{\"Foo\": \"Bar\"}\" contains \"Foo\"") + } + if Contains(mockT, simpleMap, "Bar") { + t.Error("Contains should return false: \"{\"Foo\": \"Bar\"}\" does not contains \"Bar\"") + } +} + +func TestNotContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + if !NotContains(mockT, "Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if NotContains(mockT, "Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !NotContains(mockT, list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if NotContains(mockT, list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + if NotContains(mockT, simpleMap, "Foo") { + t.Error("Contains should return true: \"{\"Foo\": \"Bar\"}\" contains \"Foo\"") + } + if !NotContains(mockT, simpleMap, "Bar") { + t.Error("Contains should return false: \"{\"Foo\": \"Bar\"}\" does not contains \"Bar\"") + } +} + +func TestSubset(t *testing.T) { + mockT := new(testing.T) + + if !Subset(mockT, []int{1, 2, 3}, nil) { + t.Error("Subset should return true: given subset is nil") + } + if !Subset(mockT, []int{1, 2, 3}, []int{}) { + t.Error("Subset should return true: any set contains the nil set") + } + if !Subset(mockT, []int{1, 2, 3}, []int{1, 2}) { + t.Error("Subset should return true: [1, 2, 3] contains [1, 2]") + } + if !Subset(mockT, []int{1, 2, 3}, []int{1, 2, 3}) { + t.Error("Subset should return true: [1, 2, 3] contains [1, 2, 3]") + } + if !Subset(mockT, []string{"hello", "world"}, []string{"hello"}) { + t.Error("Subset should return true: [\"hello\", \"world\"] contains [\"hello\"]") + } + + if Subset(mockT, []string{"hello", "world"}, []string{"hello", "testify"}) { + t.Error("Subset should return false: [\"hello\", \"world\"] does not contain [\"hello\", \"testify\"]") + } + if Subset(mockT, []int{1, 2, 3}, []int{4, 5}) { + t.Error("Subset should return false: [1, 2, 3] does not contain [4, 5]") + } + if Subset(mockT, []int{1, 2, 3}, []int{1, 5}) { + t.Error("Subset should return false: [1, 2, 3] does not contain [1, 5]") + } +} + +func TestNotSubset(t *testing.T) { + mockT := new(testing.T) + + if NotSubset(mockT, []int{1, 2, 3}, nil) { + t.Error("NotSubset should return false: given subset is nil") + } + if NotSubset(mockT, []int{1, 2, 3}, []int{}) { + t.Error("NotSubset should return false: any set contains the nil set") + } + if NotSubset(mockT, []int{1, 2, 3}, []int{1, 2}) { + t.Error("NotSubset should return false: [1, 2, 3] contains [1, 2]") + } + if NotSubset(mockT, []int{1, 2, 3}, []int{1, 2, 3}) { + t.Error("NotSubset should return false: [1, 2, 3] contains [1, 2, 3]") + } + if NotSubset(mockT, []string{"hello", "world"}, []string{"hello"}) { + t.Error("NotSubset should return false: [\"hello\", \"world\"] contains [\"hello\"]") + } + + if !NotSubset(mockT, []string{"hello", "world"}, []string{"hello", "testify"}) { + t.Error("NotSubset should return true: [\"hello\", \"world\"] does not contain [\"hello\", \"testify\"]") + } + if !NotSubset(mockT, []int{1, 2, 3}, []int{4, 5}) { + t.Error("NotSubset should return true: [1, 2, 3] does not contain [4, 5]") + } + if !NotSubset(mockT, []int{1, 2, 3}, []int{1, 5}) { + t.Error("NotSubset should return true: [1, 2, 3] does not contain [1, 5]") + } +} + +func TestNotSubsetNil(t *testing.T) { + mockT := new(testing.T) + NotSubset(mockT, []string{"foo"}, nil) + if !mockT.Failed() { + t.Error("NotSubset on nil set should have failed the test") + } +} + +func Test_includeElement(t *testing.T) { + + list1 := []string{"Foo", "Bar"} + list2 := []int{1, 2} + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + ok, found := includeElement("Hello World", "World") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Bar") + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 1) + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 2) + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo!") + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, 3) + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, "1") + True(t, ok) + False(t, found) + + ok, found = includeElement(simpleMap, "Foo") + True(t, ok) + True(t, found) + + ok, found = includeElement(simpleMap, "Bar") + True(t, ok) + False(t, found) + + ok, found = includeElement(1433, "1") + False(t, ok) + False(t, found) +} + +func TestElementsMatch(t *testing.T) { + mockT := new(testing.T) + + if !ElementsMatch(mockT, nil, nil) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{}, []int{}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{1}, []int{1}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{1, 1}, []int{1, 1}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{1, 2}, []int{1, 2}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{1, 2}, []int{2, 1}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, [2]int{1, 2}, [2]int{2, 1}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []string{"hello", "world"}, []string{"world", "hello"}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []string{"hello", "hello"}, []string{"hello", "hello"}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []string{"hello", "hello", "world"}, []string{"hello", "world", "hello"}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, [3]string{"hello", "hello", "world"}, [3]string{"hello", "world", "hello"}) { + t.Error("ElementsMatch should return true") + } + if !ElementsMatch(mockT, []int{}, nil) { + t.Error("ElementsMatch should return true") + } + + if ElementsMatch(mockT, []int{1}, []int{1, 1}) { + t.Error("ElementsMatch should return false") + } + if ElementsMatch(mockT, []int{1, 2}, []int{2, 2}) { + t.Error("ElementsMatch should return false") + } + if ElementsMatch(mockT, []string{"hello", "hello"}, []string{"hello"}) { + t.Error("ElementsMatch should return false") + } +} + +func TestCondition(t *testing.T) { + mockT := new(testing.T) + + if !Condition(mockT, func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if Condition(mockT, func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanic(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanics(t *testing.T) { + + mockT := new(testing.T) + + if !Panics(mockT, func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if Panics(mockT, func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestPanicsWithValue(t *testing.T) { + + mockT := new(testing.T) + + if !PanicsWithValue(mockT, "Panic!", func() { + panic("Panic!") + }) { + t.Error("PanicsWithValue should return true") + } + + if PanicsWithValue(mockT, "Panic!", func() { + }) { + t.Error("PanicsWithValue should return false") + } + + if PanicsWithValue(mockT, "at the disco", func() { + panic("Panic!") + }) { + t.Error("PanicsWithValue should return false") + } +} + +func TestNotPanics(t *testing.T) { + + mockT := new(testing.T) + + if !NotPanics(mockT, func() { + }) { + t.Error("NotPanics should return true") + } + + if NotPanics(mockT, func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + True(t, NoError(mockT, err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("some error") + + False(t, NoError(mockT, err), "NoError with error should return False") + + // returning an empty error interface + err = func() error { + var err *customError + if err != nil { + t.Fatal("err should be nil here") + } + return err + }() + + if err == nil { // err is not nil here! + t.Errorf("Error should be nil due to empty interface: %s", err) + } + + False(t, NoError(mockT, err), "NoError should fail with empty error interface") +} + +type customError struct{} + +func (*customError) Error() string { return "fail" } + +func TestError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + False(t, Error(mockT, err), "Error should return False for nil arg") + + // now set an error + err = errors.New("some error") + + True(t, Error(mockT, err), "Error with error should return True") + + // go vet check + True(t, Errorf(mockT, err, "example with %s", "formatted message"), "Errorf with error should rturn True") + + // returning an empty error interface + err = func() error { + var err *customError + if err != nil { + t.Fatal("err should be nil here") + } + return err + }() + + if err == nil { // err is not nil here! + t.Errorf("Error should be nil due to empty interface: %s", err) + } + + True(t, Error(mockT, err), "Error should pass with empty error interface") +} + +func TestEqualError(t *testing.T) { + mockT := new(testing.T) + + // start with a nil error + var err error + False(t, EqualError(mockT, err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + False(t, EqualError(mockT, err, "Not some error"), + "EqualError should return false for different error string") + True(t, EqualError(mockT, err, "some error"), + "EqualError should return true") +} + +func Test_isEmpty(t *testing.T) { + + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + True(t, isEmpty("")) + True(t, isEmpty(nil)) + True(t, isEmpty([]string{})) + True(t, isEmpty(0)) + True(t, isEmpty(int32(0))) + True(t, isEmpty(int64(0))) + True(t, isEmpty(false)) + True(t, isEmpty(map[string]string{})) + True(t, isEmpty(new(time.Time))) + True(t, isEmpty(time.Time{})) + True(t, isEmpty(make(chan struct{}))) + False(t, isEmpty("something")) + False(t, isEmpty(errors.New("something"))) + False(t, isEmpty([]string{"something"})) + False(t, isEmpty(1)) + False(t, isEmpty(true)) + False(t, isEmpty(map[string]string{"Hello": "World"})) + False(t, isEmpty(chWithValue)) + +} + +func TestEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + var tiP *time.Time + var tiNP time.Time + var s *string + var f *os.File + sP := &s + x := 1 + xP := &x + + type TString string + type TStruct struct { + x int + s []int + } + + True(t, Empty(mockT, ""), "Empty string is empty") + True(t, Empty(mockT, nil), "Nil is empty") + True(t, Empty(mockT, []string{}), "Empty string array is empty") + True(t, Empty(mockT, 0), "Zero int value is empty") + True(t, Empty(mockT, false), "False value is empty") + True(t, Empty(mockT, make(chan struct{})), "Channel without values is empty") + True(t, Empty(mockT, s), "Nil string pointer is empty") + True(t, Empty(mockT, f), "Nil os.File pointer is empty") + True(t, Empty(mockT, tiP), "Nil time.Time pointer is empty") + True(t, Empty(mockT, tiNP), "time.Time is empty") + True(t, Empty(mockT, TStruct{}), "struct with zero values is empty") + True(t, Empty(mockT, TString("")), "empty aliased string is empty") + True(t, Empty(mockT, sP), "ptr to nil value is empty") + + False(t, Empty(mockT, "something"), "Non Empty string is not empty") + False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty") + False(t, Empty(mockT, []string{"something"}), "Non empty string array is not empty") + False(t, Empty(mockT, 1), "Non-zero int value is not empty") + False(t, Empty(mockT, true), "True value is not empty") + False(t, Empty(mockT, chWithValue), "Channel with values is not empty") + False(t, Empty(mockT, TStruct{x: 1}), "struct with initialized values is empty") + False(t, Empty(mockT, TString("abc")), "non-empty aliased string is empty") + False(t, Empty(mockT, xP), "ptr to non-nil value is not empty") +} + +func TestNotEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + False(t, NotEmpty(mockT, ""), "Empty string is empty") + False(t, NotEmpty(mockT, nil), "Nil is empty") + False(t, NotEmpty(mockT, []string{}), "Empty string array is empty") + False(t, NotEmpty(mockT, 0), "Zero int value is empty") + False(t, NotEmpty(mockT, false), "False value is empty") + False(t, NotEmpty(mockT, make(chan struct{})), "Channel without values is empty") + + True(t, NotEmpty(mockT, "something"), "Non Empty string is not empty") + True(t, NotEmpty(mockT, errors.New("something")), "Non nil object is not empty") + True(t, NotEmpty(mockT, []string{"something"}), "Non empty string array is not empty") + True(t, NotEmpty(mockT, 1), "Non-zero int value is not empty") + True(t, NotEmpty(mockT, true), "True value is not empty") + True(t, NotEmpty(mockT, chWithValue), "Channel with values is not empty") +} + +func Test_getLen(t *testing.T) { + falseCases := []interface{}{ + nil, + 0, + true, + false, + 'A', + struct{}{}, + } + for _, v := range falseCases { + ok, l := getLen(v) + False(t, ok, "Expected getLen fail to get length of %#v", v) + Equal(t, 0, l, "getLen should return 0 for %#v", v) + } + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + trueCases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range trueCases { + ok, l := getLen(c.v) + True(t, ok, "Expected getLen success to get length of %#v", c.v) + Equal(t, c.l, l) + } +} + +func TestLen(t *testing.T) { + mockT := new(testing.T) + + False(t, Len(mockT, nil, 0), "nil does not have length") + False(t, Len(mockT, 0, 0), "int does not have length") + False(t, Len(mockT, true, 0), "true does not have length") + False(t, Len(mockT, false, 0), "false does not have length") + False(t, Len(mockT, 'A', 0), "Rune does not have length") + False(t, Len(mockT, struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + True(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } + + cases = []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 4}, + {[...]int{1, 2, 3}, 2}, + {"ABC", 2}, + {map[int]int{1: 2, 2: 4, 3: 6}, 4}, + {ch, 2}, + + {[]int{}, 1}, + {map[int]int{}, 1}, + {make(chan int), 1}, + + {[]int(nil), 1}, + {map[int]int(nil), 1}, + {(chan int)(nil), 1}, + } + + for _, c := range cases { + False(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDuration(t *testing.T) { + + mockT := new(testing.T) + a := time.Now() + b := a.Add(10 * time.Second) + + True(t, WithinDuration(mockT, a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + True(t, WithinDuration(mockT, b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + False(t, WithinDuration(mockT, a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDelta(t *testing.T) { + mockT := new(testing.T) + + True(t, InDelta(mockT, 1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, InDelta(mockT, 1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, InDelta(mockT, 1, 2, 1), "|1 - 2| <= 1") + False(t, InDelta(mockT, 1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, InDelta(mockT, 2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, InDelta(mockT, "", nil, 1), "Expected non numerals to fail") + False(t, InDelta(mockT, 42, math.NaN(), 0.01), "Expected NaN for actual to fail") + False(t, InDelta(mockT, math.NaN(), 42, 0.01), "Expected NaN for expected to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, InDelta(mockT, tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInDeltaSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InDeltaSlice(mockT, + []float64{1.001, 0.999}, + []float64{1, 1}, + 0.1), "{1.001, 0.009} is element-wise close to {1, 1} in delta=0.1") + + True(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 1), "{1, 2} is element-wise close to {0, 3} in delta=1") + + False(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 0.1), "{1, 2} is not element-wise close to {0, 3} in delta=0.1") + + False(t, InDeltaSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestInDeltaMapValues(t *testing.T) { + mockT := new(testing.T) + + for _, tc := range []struct { + title string + expect interface{} + actual interface{} + f func(TestingT, bool, ...interface{}) bool + delta float64 + }{ + { + title: "Within delta", + expect: map[string]float64{ + "foo": 1.0, + "bar": 2.0, + }, + actual: map[string]float64{ + "foo": 1.01, + "bar": 1.99, + }, + delta: 0.1, + f: True, + }, + { + title: "Within delta", + expect: map[int]float64{ + 1: 1.0, + 2: 2.0, + }, + actual: map[int]float64{ + 1: 1.0, + 2: 1.99, + }, + delta: 0.1, + f: True, + }, + { + title: "Different number of keys", + expect: map[int]float64{ + 1: 1.0, + 2: 2.0, + }, + actual: map[int]float64{ + 1: 1.0, + }, + delta: 0.1, + f: False, + }, + { + title: "Within delta with zero value", + expect: map[string]float64{ + "zero": 0.0, + }, + actual: map[string]float64{ + "zero": 0.0, + }, + delta: 0.1, + f: True, + }, + { + title: "With missing key with zero value", + expect: map[string]float64{ + "zero": 0.0, + "foo": 0.0, + }, + actual: map[string]float64{ + "zero": 0.0, + "bar": 0.0, + }, + f: False, + }, + } { + tc.f(t, InDeltaMapValues(mockT, tc.expect, tc.actual, tc.delta), tc.title+"\n"+diff(tc.expect, tc.actual)) + } +} + +func TestInEpsilon(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + {0.1, 0, 2}, + {time.Second, time.Second + time.Millisecond, 0.002}, + } + + for _, tc := range cases { + True(t, InEpsilon(t, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon), "test: %q", tc) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + {0, 0.1, 2}, // expected must be different to zero + {time.Second, time.Second + 10*time.Millisecond, 0.002}, + } + + for _, tc := range cases { + False(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + +} + +func TestInEpsilonSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.06), "{2.2, 2.0} is element-wise close to {2.1, 2.1} in espilon=0.06") + + False(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.04), "{2.2, 2.0} is not element-wise close to {2.1, 2.1} in espilon=0.04") + + False(t, InEpsilonSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestRegexp(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, Regexp(mockT, tc.rx, tc.str)) + True(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + False(t, NotRegexp(mockT, tc.rx, tc.str)) + False(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, Regexp(mockT, tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + True(t, NotRegexp(mockT, tc.rx, tc.str)) + True(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } +} + +func testAutogeneratedFunction() { + defer func() { + if err := recover(); err == nil { + panic("did not panic") + } + CallerInfo() + }() + t := struct { + io.Closer + }{} + var c io.Closer + c = t + c.Close() +} + +func TestCallerInfoWithAutogeneratedFunctions(t *testing.T) { + NotPanics(t, func() { + testAutogeneratedFunction() + }) +} + +func TestZero(t *testing.T) { + mockT := new(testing.T) + + for _, test := range zeros { + True(t, Zero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } + + for _, test := range nonZeros { + False(t, Zero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } +} + +func TestNotZero(t *testing.T) { + mockT := new(testing.T) + + for _, test := range zeros { + False(t, NotZero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } + + for _, test := range nonZeros { + True(t, NotZero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } +} + +func TestFileExists(t *testing.T) { + mockT := new(testing.T) + True(t, FileExists(mockT, "assertions.go")) + + mockT = new(testing.T) + False(t, FileExists(mockT, "random_file")) + + mockT = new(testing.T) + False(t, FileExists(mockT, "../_codegen")) +} + +func TestDirExists(t *testing.T) { + mockT := new(testing.T) + False(t, DirExists(mockT, "assertions.go")) + + mockT = new(testing.T) + False(t, DirExists(mockT, "random_dir")) + + mockT = new(testing.T) + True(t, DirExists(mockT, "../_codegen")) +} + +func TestJSONEq_EqualSONString(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`)) +} + +func TestJSONEq_EquivalentButNotEqual(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_HashOfArraysAndHashes(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, "{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}")) +} + +func TestJSONEq_Array(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`)) +} + +func TestJSONEq_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`)) +} + +func TestJSONEq_HashesNotEquivalent(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_ActualIsNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `{"foo": "bar"}`, "Not JSON")) +} + +func TestJSONEq_ExpectedIsNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, "Not JSON", `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, "Not JSON", "Not JSON")) +} + +func TestJSONEq_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`)) +} + +func TestDiff(t *testing.T) { + expected := ` + +Diff: +--- Expected ++++ Actual +@@ -1,3 +1,3 @@ + (struct { foo string }) { +- foo: (string) (len=5) "hello" ++ foo: (string) (len=3) "bar" + } +` + actual := diff( + struct{ foo string }{"hello"}, + struct{ foo string }{"bar"}, + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -2,5 +2,5 @@ + (int) 1, +- (int) 2, + (int) 3, +- (int) 4 ++ (int) 5, ++ (int) 7 + } +` + actual = diff( + []int{1, 2, 3, 4}, + []int{1, 3, 5, 7}, + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -2,4 +2,4 @@ + (int) 1, +- (int) 2, +- (int) 3 ++ (int) 3, ++ (int) 5 + } +` + actual = diff( + []int{1, 2, 3, 4}[0:3], + []int{1, 3, 5, 7}[0:3], + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -1,6 +1,6 @@ + (map[string]int) (len=4) { +- (string) (len=4) "four": (int) 4, ++ (string) (len=4) "five": (int) 5, + (string) (len=3) "one": (int) 1, +- (string) (len=5) "three": (int) 3, +- (string) (len=3) "two": (int) 2 ++ (string) (len=5) "seven": (int) 7, ++ (string) (len=5) "three": (int) 3 + } +` + + actual = diff( + map[string]int{"one": 1, "two": 2, "three": 3, "four": 4}, + map[string]int{"one": 1, "three": 3, "five": 5, "seven": 7}, + ) + Equal(t, expected, actual) +} + +func TestDiffEmptyCases(t *testing.T) { + Equal(t, "", diff(nil, nil)) + Equal(t, "", diff(struct{ foo string }{}, nil)) + Equal(t, "", diff(nil, struct{ foo string }{})) + Equal(t, "", diff(1, 2)) + Equal(t, "", diff(1, 2)) + Equal(t, "", diff([]int{1}, []bool{true})) +} + +// Ensure there are no data races +func TestDiffRace(t *testing.T) { + t.Parallel() + + expected := map[string]string{ + "a": "A", + "b": "B", + "c": "C", + } + + actual := map[string]string{ + "d": "D", + "e": "E", + "f": "F", + } + + // run diffs in parallel simulating tests with t.Parallel() + numRoutines := 10 + rChans := make([]chan string, numRoutines) + for idx := range rChans { + rChans[idx] = make(chan string) + go func(ch chan string) { + defer close(ch) + ch <- diff(expected, actual) + }(rChans[idx]) + } + + for _, ch := range rChans { + for msg := range ch { + NotZero(t, msg) // dummy assert + } + } +} + +type mockTestingT struct { +} + +func (m *mockTestingT) Errorf(format string, args ...interface{}) {} + +func TestFailNowWithPlainTestingT(t *testing.T) { + mockT := &mockTestingT{} + + Panics(t, func() { + FailNow(mockT, "failed") + }, "should panic since mockT is missing FailNow()") +} + +type mockFailNowTestingT struct { +} + +func (m *mockFailNowTestingT) Errorf(format string, args ...interface{}) {} + +func (m *mockFailNowTestingT) FailNow() {} + +func TestFailNowWithFullTestingT(t *testing.T) { + mockT := &mockFailNowTestingT{} + + NotPanics(t, func() { + FailNow(mockT, "failed") + }, "should call mockT.FailNow() rather than panicking") +} + +func TestBytesEqual(t *testing.T) { + var cases = []struct { + a, b []byte + }{ + {make([]byte, 2), make([]byte, 2)}, + {make([]byte, 2), make([]byte, 2, 3)}, + {nil, make([]byte, 0)}, + } + for i, c := range cases { + Equal(t, reflect.DeepEqual(c.a, c.b), ObjectsAreEqual(c.a, c.b), "case %d failed", i+1) + } +} + +func BenchmarkBytesEqual(b *testing.B) { + const size = 1024 * 8 + s := make([]byte, size) + for i := range s { + s[i] = byte(i % 255) + } + s2 := make([]byte, size) + copy(s2, s) + + mockT := &mockFailNowTestingT{} + b.ResetTimer() + for i := 0; i < b.N; i++ { + Equal(mockT, s, s2) + } +} + +func TestEqualArgsValidation(t *testing.T) { + err := validateEqualArgs(time.Now, time.Now) + EqualError(t, err, "cannot take func type as argument") +} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 000000000..c9dccc4d6 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,45 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the format below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 000000000..ac9dc9d1d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 000000000..9ad56851d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,16 @@ +package assert + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go b/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go new file mode 100644 index 000000000..22e1df1d9 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go @@ -0,0 +1,611 @@ +package assert + +import ( + "errors" + "regexp" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } +} + +func TestIsTypeWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqualWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Equal("Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !assert.Equal(123, 123) { + t.Error("Equal should return true") + } + if !assert.Equal(123.5, 123.5) { + t.Error("Equal should return true") + } + if !assert.Equal([]byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !assert.Equal(nil, nil) { + t.Error("Equal should return true") + } +} + +func TestEqualValuesWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.EqualValues(uint32(10), int32(10)) { + t.Error("EqualValues should return true") + } +} + +func TestNotNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.NotNil(new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if assert.NotNil(nil) { + t.Error("NotNil should return false: object is nil") + } + +} + +func TestNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Nil(nil) { + t.Error("Nil should return true: object is nil") + } + if assert.Nil(new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrueWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.True(true) { + t.Error("True should return true") + } + if assert.True(false) { + t.Error("True should return false") + } + +} + +func TestFalseWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.False(false) { + t.Error("False should return true") + } + if assert.False(true) { + t.Error("False should return false") + } + +} + +func TestExactlyWrapper(t *testing.T) { + assert := New(new(testing.T)) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if assert.Exactly(a, b) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, d) { + t.Error("Exactly should return false") + } + if !assert.Exactly(a, c) { + t.Error("Exactly should return true") + } + + if assert.Exactly(nil, a) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqualWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotEqual("Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123, 1234) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual([]byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } +} + +func TestContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.Contains("Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if assert.Contains("Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !assert.Contains(list, "Foo") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + if assert.Contains(list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + +} + +func TestNotContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.NotContains("Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if assert.NotContains("Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !assert.NotContains(list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if assert.NotContains(list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + +} + +func TestConditionWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Condition(func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if assert.Condition(func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanicWrapper(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Panics(func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if assert.Panics(func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotPanics(func() { + }) { + t.Error("NotPanics should return true") + } + + if assert.NotPanics(func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.True(mockAssert.NoError(err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.False(mockAssert.NoError(err), "NoError with error should return False") + +} + +func TestErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.False(mockAssert.Error(err), "Error should return False for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.True(mockAssert.Error(err), "Error with error should return True") + +} + +func TestEqualErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + assert.False(mockAssert.EqualError(err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + assert.False(mockAssert.EqualError(err, "Not some error"), + "EqualError should return false for different error string") + assert.True(mockAssert.EqualError(err, "some error"), + "EqualError should return true") +} + +func TestEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.Empty(""), "Empty string is empty") + assert.True(mockAssert.Empty(nil), "Nil is empty") + assert.True(mockAssert.Empty([]string{}), "Empty string array is empty") + assert.True(mockAssert.Empty(0), "Zero int value is empty") + assert.True(mockAssert.Empty(false), "False value is empty") + + assert.False(mockAssert.Empty("something"), "Non Empty string is not empty") + assert.False(mockAssert.Empty(errors.New("something")), "Non nil object is not empty") + assert.False(mockAssert.Empty([]string{"something"}), "Non empty string array is not empty") + assert.False(mockAssert.Empty(1), "Non-zero int value is not empty") + assert.False(mockAssert.Empty(true), "True value is not empty") + +} + +func TestNotEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.NotEmpty(""), "Empty string is empty") + assert.False(mockAssert.NotEmpty(nil), "Nil is empty") + assert.False(mockAssert.NotEmpty([]string{}), "Empty string array is empty") + assert.False(mockAssert.NotEmpty(0), "Zero int value is empty") + assert.False(mockAssert.NotEmpty(false), "False value is empty") + + assert.True(mockAssert.NotEmpty("something"), "Non Empty string is not empty") + assert.True(mockAssert.NotEmpty(errors.New("something")), "Non nil object is not empty") + assert.True(mockAssert.NotEmpty([]string{"something"}), "Non empty string array is not empty") + assert.True(mockAssert.NotEmpty(1), "Non-zero int value is not empty") + assert.True(mockAssert.NotEmpty(true), "True value is not empty") + +} + +func TestLenWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.Len(nil, 0), "nil does not have length") + assert.False(mockAssert.Len(0, 0), "int does not have length") + assert.False(mockAssert.Len(true, 0), "true does not have length") + assert.False(mockAssert.Len(false, 0), "false does not have length") + assert.False(mockAssert.Len('A', 0), "Rune does not have length") + assert.False(mockAssert.Len(struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + assert.True(mockAssert.Len(c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDurationWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + a := time.Now() + b := a.Add(10 * time.Second) + + assert.True(mockAssert.WithinDuration(a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + assert.True(mockAssert.WithinDuration(b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + assert.False(mockAssert.WithinDuration(a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDeltaWrapper(t *testing.T) { + assert := New(new(testing.T)) + + True(t, assert.InDelta(1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, assert.InDelta(1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, assert.InDelta(1, 2, 1), "|1 - 2| <= 1") + False(t, assert.InDelta(1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, assert.InDelta(2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, assert.InDelta("", nil, 1), "Expected non numerals to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, assert.InDelta(tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInEpsilonWrapper(t *testing.T) { + assert := New(new(testing.T)) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + } + + for _, tc := range cases { + True(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + } + + for _, tc := range cases { + False(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } +} + +func TestRegexpWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, assert.Regexp(tc.rx, tc.str)) + True(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + False(t, assert.NotRegexp(tc.rx, tc.str)) + False(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, assert.Regexp(tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + True(t, assert.NotRegexp(tc.rx, tc.str)) + True(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } +} + +func TestZeroWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + for _, test := range zeros { + assert.True(mockAssert.Zero(test), "Zero should return true for %v", test) + } + + for _, test := range nonZeros { + assert.False(mockAssert.Zero(test), "Zero should return false for %v", test) + } +} + +func TestNotZeroWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + for _, test := range zeros { + assert.False(mockAssert.NotZero(test), "Zero should return true for %v", test) + } + + for _, test := range nonZeros { + assert.True(mockAssert.NotZero(test), "Zero should return false for %v", test) + } +} + +func TestJSONEqWrapper_EqualSONString(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_EquivalentButNotEqual(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_HashOfArraysAndHashes(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq("{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") { + t.Error("JSONEq should return true") + } +} + +func TestJSONEqWrapper_Array(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_HashAndArrayNotEquivalent(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_HashesNotEquivalent(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ActualIsNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`{"foo": "bar"}`, "Not JSON") { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ExpectedIsNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq("Not JSON", `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ExpectedAndActualNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq("Not JSON", "Not JSON") { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ArraysOfDifferentOrder(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) { + t.Error("JSONEq should return false") + } +} diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 000000000..3101e78dd --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,127 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 and +// an error if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return -1, err + } + handler(w, req) + return w.Code, nil +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent + if !isSuccessCode { + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isSuccessCode +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect + if !isRedirectCode { + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isRedirectCode +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isErrorCode := code >= http.StatusBadRequest + if !isErrorCode { + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isErrorCode +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return !contains +} diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions_test.go b/vendor/github.com/stretchr/testify/assert/http_assertions_test.go new file mode 100644 index 000000000..3ab76830f --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions_test.go @@ -0,0 +1,117 @@ +package assert + +import ( + "fmt" + "net/http" + "net/url" + "testing" +) + +func httpOK(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func httpRedirect(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTemporaryRedirect) +} + +func httpError(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) +} + +func TestHTTPSuccess(t *testing.T) { + assert := New(t) + + mockT1 := new(testing.T) + assert.Equal(HTTPSuccess(mockT1, httpOK, "GET", "/", nil), true) + assert.False(mockT1.Failed()) + + mockT2 := new(testing.T) + assert.Equal(HTTPSuccess(mockT2, httpRedirect, "GET", "/", nil), false) + assert.True(mockT2.Failed()) + + mockT3 := new(testing.T) + assert.Equal(HTTPSuccess(mockT3, httpError, "GET", "/", nil), false) + assert.True(mockT3.Failed()) +} + +func TestHTTPRedirect(t *testing.T) { + assert := New(t) + + mockT1 := new(testing.T) + assert.Equal(HTTPRedirect(mockT1, httpOK, "GET", "/", nil), false) + assert.True(mockT1.Failed()) + + mockT2 := new(testing.T) + assert.Equal(HTTPRedirect(mockT2, httpRedirect, "GET", "/", nil), true) + assert.False(mockT2.Failed()) + + mockT3 := new(testing.T) + assert.Equal(HTTPRedirect(mockT3, httpError, "GET", "/", nil), false) + assert.True(mockT3.Failed()) +} + +func TestHTTPError(t *testing.T) { + assert := New(t) + + mockT1 := new(testing.T) + assert.Equal(HTTPError(mockT1, httpOK, "GET", "/", nil), false) + assert.True(mockT1.Failed()) + + mockT2 := new(testing.T) + assert.Equal(HTTPError(mockT2, httpRedirect, "GET", "/", nil), false) + assert.True(mockT2.Failed()) + + mockT3 := new(testing.T) + assert.Equal(HTTPError(mockT3, httpError, "GET", "/", nil), true) + assert.False(mockT3.Failed()) +} + +func TestHTTPStatusesWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.Equal(mockAssert.HTTPSuccess(httpOK, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPSuccess(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPSuccess(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPRedirect(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPRedirect(httpRedirect, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPRedirect(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPError(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpError, "GET", "/", nil), true) +} + +func httpHelloName(w http.ResponseWriter, r *http.Request) { + name := r.FormValue("name") + w.Write([]byte(fmt.Sprintf("Hello, %s!", name))) +} + +func TestHttpBody(t *testing.T) { + assert := New(t) + mockT := new(testing.T) + + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) +} + +func TestHttpBodyWrappers(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + +} diff --git a/vendor/github.com/stretchr/testify/doc.go b/vendor/github.com/stretchr/testify/doc.go new file mode 100644 index 000000000..377d5cc56 --- /dev/null +++ b/vendor/github.com/stretchr/testify/doc.go @@ -0,0 +1,22 @@ +// Package testify is a set of packages that provide many tools for testifying that your code will behave as you intend. +// +// testify contains the following packages: +// +// The assert package provides a comprehensive set of assertion functions that tie in to the Go testing system. +// +// The http package contains tools to make it easier to test http activity using the Go testing system. +// +// The mock package provides a system by which it is possible to mock your objects and verify calls are happening as expected. +// +// The suite package provides a basic structure for using structs as testing suites, and methods on those structs as tests. It includes setup/teardown functionality in the way of interfaces. +package testify + +// blank imports help docs. +import ( + // assert package + _ "github.com/stretchr/testify/assert" + // http package + _ "github.com/stretchr/testify/http" + // mock package + _ "github.com/stretchr/testify/mock" +) diff --git a/vendor/github.com/stretchr/testify/http/doc.go b/vendor/github.com/stretchr/testify/http/doc.go new file mode 100644 index 000000000..695167c6d --- /dev/null +++ b/vendor/github.com/stretchr/testify/http/doc.go @@ -0,0 +1,2 @@ +// Package http DEPRECATED USE net/http/httptest +package http diff --git a/vendor/github.com/stretchr/testify/http/test_response_writer.go b/vendor/github.com/stretchr/testify/http/test_response_writer.go new file mode 100644 index 000000000..5c3f813fa --- /dev/null +++ b/vendor/github.com/stretchr/testify/http/test_response_writer.go @@ -0,0 +1,49 @@ +package http + +import ( + "net/http" +) + +// TestResponseWriter DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +type TestResponseWriter struct { + + // StatusCode is the last int written by the call to WriteHeader(int) + StatusCode int + + // Output is a string containing the written bytes using the Write([]byte) func. + Output string + + // header is the internal storage of the http.Header object + header http.Header +} + +// Header DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) Header() http.Header { + + if rw.header == nil { + rw.header = make(http.Header) + } + + return rw.header +} + +// Write DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) Write(bytes []byte) (int, error) { + + // assume 200 success if no header has been set + if rw.StatusCode == 0 { + rw.WriteHeader(200) + } + + // add these bytes to the output string + rw.Output = rw.Output + string(bytes) + + // return normal values + return 0, nil + +} + +// WriteHeader DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) WriteHeader(i int) { + rw.StatusCode = i +} diff --git a/vendor/github.com/stretchr/testify/http/test_round_tripper.go b/vendor/github.com/stretchr/testify/http/test_round_tripper.go new file mode 100644 index 000000000..b1e32f1d8 --- /dev/null +++ b/vendor/github.com/stretchr/testify/http/test_round_tripper.go @@ -0,0 +1,17 @@ +package http + +import ( + "github.com/stretchr/testify/mock" + "net/http" +) + +// TestRoundTripper DEPRECATED USE net/http/httptest +type TestRoundTripper struct { + mock.Mock +} + +// RoundTrip DEPRECATED USE net/http/httptest +func (t *TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + args := t.Called(req) + return args.Get(0).(*http.Response), args.Error(1) +} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 000000000..7324128ef --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,44 @@ +// Package mock provides a system by which it is possible to mock your objects +// and verify calls are happening as expected. +// +// Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 000000000..1e232b56e --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,815 @@ +package mock + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + Parent *Mock + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Amount of times this call has been called + totalCalls int + + // Call to this method can be optional + optional bool + + // Holds a channel that will be used to block the Return until it either + // receives a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + waitTime time.Duration + + // Holds a handler used to manipulate arguments content that are passed by + // reference. It's useful when mocking methods such as unmarshalers or + // decoders. + RunFn func(Arguments) +} + +func newCall(parent *Mock, methodName string, methodArguments ...interface{}) *Call { + return &Call{ + Parent: parent, + Method: methodName, + Arguments: methodArguments, + ReturnArguments: make([]interface{}, 0), + Repeatability: 0, + WaitFor: nil, + RunFn: nil, + } +} + +func (c *Call) lock() { + c.Parent.mutex.Lock() +} + +func (c *Call) unlock() { + c.Parent.mutex.Unlock() +} + +// Return specifies the return arguments for the expectation. +// +// Mock.On("DoSomething").Return(errors.New("failed")) +func (c *Call) Return(returnArguments ...interface{}) *Call { + c.lock() + defer c.unlock() + + c.ReturnArguments = returnArguments + + return c +} + +// Once indicates that that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (c *Call) Once() *Call { + return c.Times(1) +} + +// Twice indicates that that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (c *Call) Twice() *Call { + return c.Times(2) +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (c *Call) Times(i int) *Call { + c.lock() + defer c.unlock() + c.Repeatability = i + return c +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +func (c *Call) WaitUntil(w <-chan time.Time) *Call { + c.lock() + defer c.unlock() + c.WaitFor = w + return c +} + +// After sets how long to block until the call returns +// +// Mock.On("MyMethod", arg1, arg2).After(time.Second) +func (c *Call) After(d time.Duration) *Call { + c.lock() + defer c.unlock() + c.waitTime = d + return c +} + +// Run sets a handler to be called before returning. It can be used when +// mocking a method such as unmarshalers that takes a pointer to a struct and +// sets properties in such struct +// +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) +func (c *Call) Run(fn func(args Arguments)) *Call { + c.lock() + defer c.unlock() + c.RunFn = fn + return c +} + +// Maybe allows the method call to be optional. Not calling an optional method +// will not cause an error while asserting expectations +func (c *Call) Maybe() *Call { + c.lock() + defer c.unlock() + c.optional = true + return c +} + +// On chains a new expectation description onto the mocked interface. This +// allows syntax like. +// +// Mock. +// On("MyMethod", 1).Return(nil). +// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +func (c *Call) On(methodName string, arguments ...interface{}) *Call { + return c.Parent.On(methodName, arguments...) +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top +// of this document. +type Mock struct { + // Represents the calls that are expected of + // an object. + ExpectedCalls []*Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map + + mutex sync.Mutex +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Call { + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + m.mutex.Lock() + defer m.mutex.Unlock() + c := newCall(m, methodName, arguments...) + m.ExpectedCalls = append(m.ExpectedCalls, c) + return c +} + +// /* +// Recording and responding to activity +// */ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + for i, call := range m.ExpectedCalls { + if call.Method == method && call.Repeatability > -1 { + + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + return i, call + } + + } + } + return -1, nil +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) { + diffCount := 0 + var closestCall *Call + + for _, call := range m.expectedCalls() { + if call.Method == method { + + _, tempDiffCount := call.Arguments.Diff(arguments) + if tempDiffCount < diffCount || diffCount == 0 { + diffCount = tempDiffCount + closestCall = call + } + + } + } + + if closestCall == nil { + return false, nil + } + + return true, closestCall +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + + var argValsString string + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceded by +// appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) Called(arguments ...interface{}) Arguments { + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + //Next four lines are required to use GCCGO function naming conventions. + //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + //With GCCGO we need to remove interface information starting from pN

. + re := regexp.MustCompile("\\.pN\\d+_") + if re.MatchString(functionPath) { + functionPath = re.Split(functionPath, -1)[0] + } + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + return m.MethodCalled(functionName, arguments...) +} + +// MethodCalled tells the mock object that the given method has been called, and gets +// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded +// by appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { + m.mutex.Lock() + found, call := m.findExpectedCall(methodName, arguments...) + + if found < 0 { + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + + closestFound, closestCall := m.findClosestCall(methodName, arguments...) + m.mutex.Unlock() + + if closestFound { + panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\n", callString(methodName, arguments, true), callString(methodName, closestCall.Arguments, true), diffArguments(closestCall.Arguments, arguments))) + } else { + panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo())) + } + } + + if call.Repeatability == 1 { + call.Repeatability = -1 + } else if call.Repeatability > 1 { + call.Repeatability-- + } + call.totalCalls++ + + // add the call + m.Calls = append(m.Calls, *newCall(m, methodName, arguments...)) + m.mutex.Unlock() + + // block if specified + if call.WaitFor != nil { + <-call.WaitFor + } else { + time.Sleep(call.waitTime) + } + + m.mutex.Lock() + runFn := call.RunFn + m.mutex.Unlock() + + if runFn != nil { + runFn(arguments) + } + + m.mutex.Lock() + returnArgs := call.ReturnArguments + m.mutex.Unlock() + + return returnArgs +} + +/* + Assertions +*/ + +type assertExpectationser interface { + AssertExpectations(TestingT) bool +} + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + for _, obj := range testObjects { + if m, ok := obj.(Mock); ok { + t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") + obj = &m + } + m := obj.(assertExpectationser) + if !m.AssertExpectations(t) { + return false + } + } + return true +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + var somethingMissing bool + var failedExpectations int + + // iterate through each expectation + expectedCalls := m.expectedCalls() + for _, expectedCall := range expectedCalls { + if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { + somethingMissing = true + failedExpectations++ + t.Logf("FAIL:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } else { + if expectedCall.Repeatability > 0 { + somethingMissing = true + failedExpectations++ + t.Logf("FAIL:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } else { + t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } + } + } + + if somethingMissing { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) + } + + return !somethingMissing +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + var actualCalls int + for _, call := range m.calls() { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) { + t.Logf("%v", m.expectedCalls()) + return false + } + return true +} + +// AssertNotCalled asserts that the method was not called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) { + t.Logf("%v", m.expectedCalls()) + return false + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.calls() { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +func (m *Mock) expectedCalls() []*Call { + return append([]*Call{}, m.ExpectedCalls...) +} + +func (m *Mock) calls() []Call { + return append([]Call{}, m.Calls...) +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // Anything is used in Diff and Assert when the argument being tested + // shouldn't be taken into consideration. + Anything string = "mock.Anything" +) + +// AnythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type AnythingOfTypeArgument string + +// AnythingOfType returns an AnythingOfTypeArgument object containing the +// name of the type to check for. Used in Diff and Assert. +// +// For example: +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +} + +// argumentMatcher performs custom argument matching, returning whether or +// not the argument is matched by the expectation fixture function. +type argumentMatcher struct { + // fn is a function which accepts one argument, and returns a bool. + fn reflect.Value +} + +func (f argumentMatcher) Matches(argument interface{}) bool { + expectType := f.fn.Type().In(0) + expectTypeNilSupported := false + switch expectType.Kind() { + case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: + expectTypeNilSupported = true + } + + argType := reflect.TypeOf(argument) + var arg reflect.Value + if argType == nil { + arg = reflect.New(expectType).Elem() + } else { + arg = reflect.ValueOf(argument) + } + + if argType == nil && !expectTypeNilSupported { + panic(errors.New("attempting to call matcher with nil for non-nil expected type")) + } + if argType == nil || argType.AssignableTo(expectType) { + result := f.fn.Call([]reflect.Value{arg}) + return result[0].Bool() + } + return false +} + +func (f argumentMatcher) String() string { + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) +} + +// MatchedBy can be used to match a mock call based on only certain properties +// from a complex struct or some calculation. It takes a function that will be +// evaluated with the called argument and will return true when there's a match +// and false otherwise. +// +// Example: +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// |fn|, must be a function accepting a single argument (of the expected type) +// which returns a bool. If |fn| doesn't match the required signature, +// MatchedBy() panics. +func MatchedBy(fn interface{}) argumentMatcher { + fnType := reflect.TypeOf(fn) + + if fnType.Kind() != reflect.Func { + panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) + } + if fnType.NumIn() != 1 { + panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) + } + if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { + panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) + } + + return argumentMatcher{fn: reflect.ValueOf(fn)} +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + + var output = "\n" + var differences int + + var maxArgCount = len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + + if len(objects) <= i { + actual = "(Missing)" + } else { + actual = objects[i] + } + + if len(args) <= i { + expected = "(Missing)" + } else { + expected = args[i] + } + + if matcher, ok := expected.(argumentMatcher); ok { + if matcher.Matches(actual) { + output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actual, matcher) + } else { + differences++ + output = fmt.Sprintf("%s\t%d: PASS: %s not matched by %s\n", output, i, actual, matcher) + } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { + + // type checking + if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) + } + + } else { + + // normal checking + + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actual, expected) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actual, expected) + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences + +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false + +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + var index = indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) + +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +func diffArguments(expected Arguments, actual Arguments) string { + if len(expected) != len(actual) { + return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) + } + + for x := range expected { + if diffString := diff(expected[x], actual[x]); diffString != "" { + return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) + } + } + + return "" +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return diff +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} diff --git a/vendor/github.com/stretchr/testify/mock/mock_test.go b/vendor/github.com/stretchr/testify/mock/mock_test.go new file mode 100644 index 000000000..cb245ba59 --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock_test.go @@ -0,0 +1,1352 @@ +package mock + +import ( + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +/* + Test objects +*/ + +// ExampleInterface represents an example interface. +type ExampleInterface interface { + TheExampleMethod(a, b, c int) (int, error) +} + +// TestExampleImplementation is a test implementation of ExampleInterface +type TestExampleImplementation struct { + Mock +} + +func (i *TestExampleImplementation) TheExampleMethod(a, b, c int) (int, error) { + args := i.Called(a, b, c) + return args.Int(0), errors.New("Whoops") +} + +func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) { + i.Called(yesorno) +} + +type ExampleType struct { + ran bool +} + +func (i *TestExampleImplementation) TheExampleMethod3(et *ExampleType) error { + args := i.Called(et) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethod4(v ExampleInterface) error { + args := i.Called(v) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethod5(ch chan struct{}) error { + args := i.Called(ch) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethod6(m map[string]bool) error { + args := i.Called(m) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethod7(slice []bool) error { + args := i.Called(slice) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodFunc(fn func(string) error) error { + args := i.Called(fn) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodVariadic(a ...int) error { + args := i.Called(a) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodVariadicInterface(a ...interface{}) error { + args := i.Called(a) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodMixedVariadic(a int, b ...int) error { + args := i.Called(a, b) + return args.Error(0) +} + +type ExampleFuncType func(string) error + +func (i *TestExampleImplementation) TheExampleMethodFuncType(fn ExampleFuncType) error { + args := i.Called(fn) + return args.Error(0) +} + +/* + Mock +*/ + +func Test_Mock_TestData(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + if assert.NotNil(t, mockedService.TestData()) { + + mockedService.TestData().Set("something", 123) + assert.Equal(t, 123, mockedService.TestData().Get("something").Data()) + } +} + +func Test_Mock_On(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod") + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethod", c.Method) +} + +func Test_Mock_Chained_On(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + + mockedService. + On("TheExampleMethod", 1, 2, 3). + Return(0). + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Return(nil) + + expectedCalls := []*Call{ + &Call{ + Parent: &mockedService.Mock, + Method: "TheExampleMethod", + Arguments: []interface{}{1, 2, 3}, + ReturnArguments: []interface{}{0}, + }, + &Call{ + Parent: &mockedService.Mock, + Method: "TheExampleMethod3", + Arguments: []interface{}{AnythingOfType("*mock.ExampleType")}, + ReturnArguments: []interface{}{nil}, + }, + } + assert.Equal(t, expectedCalls, mockedService.ExpectedCalls) +} + +func Test_Mock_On_WithArgs(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod", 1, 2, 3, 4) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethod", c.Method) + assert.Equal(t, Arguments{1, 2, 3, 4}, c.Arguments) +} + +func Test_Mock_On_WithFuncArg(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodFunc", AnythingOfType("func(string) error")). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethodFunc", c.Method) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, AnythingOfType("func(string) error"), c.Arguments[0]) + + fn := func(string) error { return nil } + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodFunc(fn) + }) +} + +func Test_Mock_On_WithIntArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod", + MatchedBy(func(a int) bool { + return a == 1 + }), MatchedBy(func(b int) bool { + return b == 2 + }), MatchedBy(func(c int) bool { + return c == 3 + })).Return(0, nil) + + assert.Panics(t, func() { + mockedService.TheExampleMethod(1, 2, 4) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethod(2, 2, 3) + }) + assert.NotPanics(t, func() { + mockedService.TheExampleMethod(1, 2, 3) + }) +} + +func Test_Mock_On_WithPtrArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod3", + MatchedBy(func(a *ExampleType) bool { return a != nil && a.ran == true }), + ).Return(nil) + + mockedService.On("TheExampleMethod3", + MatchedBy(func(a *ExampleType) bool { return a != nil && a.ran == false }), + ).Return(errors.New("error")) + + mockedService.On("TheExampleMethod3", + MatchedBy(func(a *ExampleType) bool { return a == nil }), + ).Return(errors.New("error2")) + + assert.Equal(t, mockedService.TheExampleMethod3(&ExampleType{true}), nil) + assert.EqualError(t, mockedService.TheExampleMethod3(&ExampleType{false}), "error") + assert.EqualError(t, mockedService.TheExampleMethod3(nil), "error2") +} + +func Test_Mock_On_WithFuncArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + fixture1, fixture2 := errors.New("fixture1"), errors.New("fixture2") + + mockedService.On("TheExampleMethodFunc", + MatchedBy(func(a func(string) error) bool { return a != nil && a("string") == fixture1 }), + ).Return(errors.New("fixture1")) + + mockedService.On("TheExampleMethodFunc", + MatchedBy(func(a func(string) error) bool { return a != nil && a("string") == fixture2 }), + ).Return(errors.New("fixture2")) + + mockedService.On("TheExampleMethodFunc", + MatchedBy(func(a func(string) error) bool { return a == nil }), + ).Return(errors.New("fixture3")) + + assert.EqualError(t, mockedService.TheExampleMethodFunc( + func(string) error { return fixture1 }), "fixture1") + assert.EqualError(t, mockedService.TheExampleMethodFunc( + func(string) error { return fixture2 }), "fixture2") + assert.EqualError(t, mockedService.TheExampleMethodFunc(nil), "fixture3") +} + +func Test_Mock_On_WithInterfaceArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod4", + MatchedBy(func(a ExampleInterface) bool { return a == nil }), + ).Return(errors.New("fixture1")) + + assert.EqualError(t, mockedService.TheExampleMethod4(nil), "fixture1") +} + +func Test_Mock_On_WithChannelArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod5", + MatchedBy(func(ch chan struct{}) bool { return ch == nil }), + ).Return(errors.New("fixture1")) + + assert.EqualError(t, mockedService.TheExampleMethod5(nil), "fixture1") +} + +func Test_Mock_On_WithMapArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod6", + MatchedBy(func(m map[string]bool) bool { return m == nil }), + ).Return(errors.New("fixture1")) + + assert.EqualError(t, mockedService.TheExampleMethod6(nil), "fixture1") +} + +func Test_Mock_On_WithSliceArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod7", + MatchedBy(func(slice []bool) bool { return slice == nil }), + ).Return(errors.New("fixture1")) + + assert.EqualError(t, mockedService.TheExampleMethod7(nil), "fixture1") +} + +func Test_Mock_On_WithVariadicFunc(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodVariadic", []int{1, 2, 3}). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, []int{1, 2, 3}, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadic(1, 2, 3) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadic(1, 2) + }) + +} + +func Test_Mock_On_WithMixedVariadicFunc(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodMixedVariadic", 1, []int{2, 3, 4}). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 2, len(c.Arguments)) + assert.Equal(t, 1, c.Arguments[0]) + assert.Equal(t, []int{2, 3, 4}, c.Arguments[1]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodMixedVariadic(1, 2, 3, 4) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodMixedVariadic(1, 2, 3, 5) + }) + +} + +func Test_Mock_On_WithVariadicFuncWithInterface(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethodVariadicInterface", []interface{}{1, 2, 3}). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, []interface{}{1, 2, 3}, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2, 3) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2) + }) + +} + +func Test_Mock_On_WithVariadicFuncWithEmptyInterfaceArray(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + var expected []interface{} + c := mockedService. + On("TheExampleMethodVariadicInterface", expected). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, expected, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadicInterface() + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2) + }) + +} + +func Test_Mock_On_WithFuncPanics(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + + assert.Panics(t, func() { + mockedService.On("TheExampleMethodFunc", func(string) error { return nil }) + }) +} + +func Test_Mock_On_WithFuncTypeArg(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodFuncType", AnythingOfType("mock.ExampleFuncType")). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, AnythingOfType("mock.ExampleFuncType"), c.Arguments[0]) + + fn := func(string) error { return nil } + assert.NotPanics(t, func() { + mockedService.TheExampleMethodFuncType(fn) + }) +} + +func Test_Mock_Return(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_WaitUntil(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + ch := time.After(time.Second) + + c := mockedService.Mock. + On("TheExampleMethod", "A", "B", true). + WaitUntil(ch). + Return(1, "two", true) + + // assert that the call was created + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.Equal(t, ch, call.WaitFor) +} + +func Test_Mock_Return_After(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.Mock. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + After(time.Second) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + +} + +func Test_Mock_Return_Run(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + fn := func(args Arguments) { + arg := args.Get(0).(*ExampleType) + arg.ran = true + } + + c := mockedService.Mock. + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Return(nil). + Run(fn) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod3", call.Method) + assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0]) + assert.Equal(t, nil, call.ReturnArguments[0]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + assert.NotNil(t, call.Run) + + et := ExampleType{} + assert.Equal(t, false, et.ran) + mockedService.TheExampleMethod3(&et) + assert.Equal(t, true, et.ran) +} + +func Test_Mock_Return_Run_Out_Of_Order(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + f := func(args Arguments) { + arg := args.Get(0).(*ExampleType) + arg.ran = true + } + + c := mockedService.Mock. + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Run(f). + Return(nil) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod3", call.Method) + assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0]) + assert.Equal(t, nil, call.ReturnArguments[0]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + assert.NotNil(t, call.Run) +} + +func Test_Mock_Return_Once(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Once() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 1, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Twice(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Twice() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 2, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Times(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Times(5) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 5, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Nothing(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 0, len(call.ReturnArguments)) +} + +func Test_Mock_findExpectedCall(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_Mock_findExpectedCall_For_Unknown_Method(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, _ := m.findExpectedCall("Two") + + assert.Equal(t, -1, f) + +} + +func Test_Mock_findExpectedCall_Respects_Repeatability(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two").Once() + m.On("Two", 3).Return("three").Twice() + m.On("Two", 3).Return("three").Times(8) + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_callString(t *testing.T) { + + assert.Equal(t, `Method(int,bool,string)`, callString("Method", []interface{}{1, true, "something"}, false)) + +} + +func Test_Mock_Called(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_Called", 1, 2, 3).Return(5, "6", true) + + returnArguments := mockedService.Called(1, 2, 3) + + if assert.Equal(t, 1, len(mockedService.Calls)) { + assert.Equal(t, "Test_Mock_Called", mockedService.Calls[0].Method) + assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func asyncCall(m *Mock, ch chan Arguments) { + ch <- m.Called(1, 2, 3) +} + +func Test_Mock_Called_blocks(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.Mock.On("asyncCall", 1, 2, 3).Return(5, "6", true).After(2 * time.Millisecond) + + ch := make(chan Arguments) + + go asyncCall(&mockedService.Mock, ch) + + select { + case <-ch: + t.Fatal("should have waited") + case <-time.After(1 * time.Millisecond): + } + + returnArguments := <-ch + + if assert.Equal(t, 1, len(mockedService.Mock.Calls)) { + assert.Equal(t, "asyncCall", mockedService.Mock.Calls[0].Method) + assert.Equal(t, 1, mockedService.Mock.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Mock.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Mock.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func Test_Mock_Called_For_Bounded_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService. + On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3). + Return(5, "6", true). + Once() + mockedService. + On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3). + Return(-1, "hi", false) + + returnArguments1 := mockedService.Called(1, 2, 3) + returnArguments2 := mockedService.Called(1, 2, 3) + + if assert.Equal(t, 2, len(mockedService.Calls)) { + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[0].Method) + assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) + + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[1].Method) + assert.Equal(t, 1, mockedService.Calls[1].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[1].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[1].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments1)) { + assert.Equal(t, 5, returnArguments1[0]) + assert.Equal(t, "6", returnArguments1[1]) + assert.Equal(t, true, returnArguments1[2]) + } + + if assert.Equal(t, 3, len(returnArguments2)) { + assert.Equal(t, -1, returnArguments2[0]) + assert.Equal(t, "hi", returnArguments2[1]) + assert.Equal(t, false, returnArguments2[2]) + } + +} + +func Test_Mock_Called_For_SetTime_Expectation(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod", 1, 2, 3).Return(5, "6", true).Times(4) + + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + assert.Panics(t, func() { + mockedService.TheExampleMethod(1, 2, 3) + }) + +} + +func Test_Mock_Called_Unexpected(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + // make sure it panics if no expectation was made + assert.Panics(t, func() { + mockedService.Called(1, 2, 3) + }, "Calling unexpected method should panic") + +} + +func Test_AssertExpectationsForObjects_Helper(t *testing.T) { + + var mockedService1 = new(TestExampleImplementation) + var mockedService2 = new(TestExampleImplementation) + var mockedService3 = new(TestExampleImplementation) + + mockedService1.On("Test_AssertExpectationsForObjects_Helper", 1).Return() + mockedService2.On("Test_AssertExpectationsForObjects_Helper", 2).Return() + mockedService3.On("Test_AssertExpectationsForObjects_Helper", 3).Return() + + mockedService1.Called(1) + mockedService2.Called(2) + mockedService3.Called(3) + + assert.True(t, AssertExpectationsForObjects(t, &mockedService1.Mock, &mockedService2.Mock, &mockedService3.Mock)) + assert.True(t, AssertExpectationsForObjects(t, mockedService1, mockedService2, mockedService3)) + +} + +func Test_AssertExpectationsForObjects_Helper_Failed(t *testing.T) { + + var mockedService1 = new(TestExampleImplementation) + var mockedService2 = new(TestExampleImplementation) + var mockedService3 = new(TestExampleImplementation) + + mockedService1.On("Test_AssertExpectationsForObjects_Helper_Failed", 1).Return() + mockedService2.On("Test_AssertExpectationsForObjects_Helper_Failed", 2).Return() + mockedService3.On("Test_AssertExpectationsForObjects_Helper_Failed", 3).Return() + + mockedService1.Called(1) + mockedService3.Called(3) + + tt := new(testing.T) + assert.False(t, AssertExpectationsForObjects(tt, &mockedService1.Mock, &mockedService2.Mock, &mockedService3.Mock)) + assert.False(t, AssertExpectationsForObjects(tt, mockedService1, mockedService2, mockedService3)) + +} + +func Test_Mock_AssertExpectations(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations", 1, 2, 3).Return(5, 6, 7) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_Placeholder_NoArgs(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_Placeholder_NoArgs").Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertExpectations_Placeholder_NoArgs").Return(7, 6, 5) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called() + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_Placeholder(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_Placeholder", 1, 2, 3).Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertExpectations_Placeholder", 3, 2, 1).Return(7, 6, 5) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.False(t, mockedService.AssertExpectations(tt)) + + // make call to the second expectation + mockedService.Called(3, 2, 1) + + // now assert expectations again + assert.True(t, mockedService.AssertExpectations(tt)) +} + +func Test_Mock_AssertExpectations_With_Pointers(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_With_Pointers", &struct{ Foo int }{1}).Return(1) + mockedService.On("Test_Mock_AssertExpectations_With_Pointers", &struct{ Foo int }{2}).Return(2) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + s := struct{ Foo int }{1} + // make the calls now + mockedService.Called(&s) + s.Foo = 2 + mockedService.Called(&s) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectationsCustomType(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).Return(nil).Once() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.TheExampleMethod3(&ExampleType{}) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_With_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Twice() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + assert.False(t, mockedService.AssertExpectations(tt)) + + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_TwoCallsWithDifferentArguments(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 1, 2, 3).Return(5, 6, 7) + mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 4, 5, 6).Return(5, 6, 7) + + args1 := mockedService.Called(1, 2, 3) + assert.Equal(t, 5, args1.Int(0)) + assert.Equal(t, 6, args1.Int(1)) + assert.Equal(t, 7, args1.Int(2)) + + args2 := mockedService.Called(4, 5, 6) + assert.Equal(t, 5, args2.Int(0)) + assert.Equal(t, 6, args2.Int(1)) + assert.Equal(t, 7, args2.Int(2)) + +} + +func Test_Mock_AssertNumberOfCalls(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertNumberOfCalls", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 1)) + + mockedService.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 2)) + +} + +func Test_Mock_AssertCalled(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled", 1, 2, 3)) + +} + +func Test_Mock_AssertCalled_WithAnythingOfTypeArgument(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService. + On("Test_Mock_AssertCalled_WithAnythingOfTypeArgument", Anything, Anything, Anything). + Return() + + mockedService.Called(1, "two", []uint8("three")) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled_WithAnythingOfTypeArgument", AnythingOfType("int"), AnythingOfType("string"), AnythingOfType("[]uint8"))) + +} + +func Test_Mock_AssertCalled_WithArguments(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithArguments", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 1, 2, 3)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 2, 3, 4)) + +} + +func Test_Mock_AssertCalled_WithArguments_With_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4).Return(5, 6, 7).Once() + + mockedService.Called(1, 2, 3) + mockedService.Called(2, 3, 4) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3)) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 3, 4, 5)) + +} + +func Test_Mock_AssertNotCalled(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertNotCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + assert.True(t, mockedService.AssertNotCalled(t, "Test_Mock_NotCalled")) + +} + +func Test_Mock_AssertOptional(t *testing.T) { + // Optional called + var ms1 = new(TestExampleImplementation) + ms1.On("TheExampleMethod", 1, 2, 3).Maybe().Return(4, nil) + ms1.TheExampleMethod(1, 2, 3) + + tt1 := new(testing.T) + assert.Equal(t, true, ms1.AssertExpectations(tt1)) + + // Optional not called + var ms2 = new(TestExampleImplementation) + ms2.On("TheExampleMethod", 1, 2, 3).Maybe().Return(4, nil) + + tt2 := new(testing.T) + assert.Equal(t, true, ms2.AssertExpectations(tt2)) + + // Non-optional called + var ms3 = new(TestExampleImplementation) + ms3.On("TheExampleMethod", 1, 2, 3).Return(4, nil) + ms3.TheExampleMethod(1, 2, 3) + + tt3 := new(testing.T) + assert.Equal(t, true, ms3.AssertExpectations(tt3)) +} + +/* + Arguments helper methods +*/ +func Test_Arguments_Get(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.Equal(t, "string", args.Get(0).(string)) + assert.Equal(t, 123, args.Get(1).(int)) + assert.Equal(t, true, args.Get(2).(bool)) + +} + +func Test_Arguments_Is(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.True(t, args.Is("string", 123, true)) + assert.False(t, args.Is("wrong", 456, false)) + +} + +func Test_Arguments_Diff(t *testing.T) { + + var args = Arguments([]interface{}{"Hello World", 123, true}) + var diff string + var count int + diff, count = args.Diff([]interface{}{"Hello World", 456, "false"}) + + assert.Equal(t, 2, count) + assert.Contains(t, diff, `%!s(int=456) != %!s(int=123)`) + assert.Contains(t, diff, `false != %!s(bool=true)`) + +} + +func Test_Arguments_Diff_DifferentNumberOfArgs(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + var diff string + var count int + diff, count = args.Diff([]interface{}{"string", 456, "false", "extra"}) + + assert.Equal(t, 3, count) + assert.Contains(t, diff, `extra != (Missing)`) + +} + +func Test_Arguments_Diff_WithAnythingArgument(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + var count int + _, count = args.Diff([]interface{}{"string", Anything, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingArgument_InActualToo(t *testing.T) { + + var args = Arguments([]interface{}{"string", Anything, true}) + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument(t *testing.T) { + + var args = Arguments([]interface{}{"string", AnythingOfType("int"), true}) + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument_Failing(t *testing.T) { + + var args = Arguments([]interface{}{"string", AnythingOfType("string"), true}) + var count int + var diff string + diff, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 1, count) + assert.Contains(t, diff, `string != type int - %!s(int=123)`) + +} + +func Test_Arguments_Diff_WithArgMatcher(t *testing.T) { + matchFn := func(a int) bool { + return a == 123 + } + var args = Arguments([]interface{}{"string", MatchedBy(matchFn), true}) + + diff, count := args.Diff([]interface{}{"string", 124, true}) + assert.Equal(t, 1, count) + assert.Contains(t, diff, `%!s(int=124) not matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", false, true}) + assert.Equal(t, 1, count) + assert.Contains(t, diff, `%!s(bool=false) not matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", 123, false}) + assert.Contains(t, diff, `%!s(int=123) matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", 123, true}) + assert.Equal(t, 0, count) + assert.Contains(t, diff, `No differences.`) +} + +func Test_Arguments_Assert(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.True(t, args.Assert(t, "string", 123, true)) + +} + +func Test_Arguments_String_Representation(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, `string,int,bool`, args.String()) + +} + +func Test_Arguments_String(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, "string", args.String(0)) + +} + +func Test_Arguments_Error(t *testing.T) { + + var err = errors.New("An Error") + var args = Arguments([]interface{}{"string", 123, true, err}) + assert.Equal(t, err, args.Error(3)) + +} + +func Test_Arguments_Error_Nil(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true, nil}) + assert.Equal(t, nil, args.Error(3)) + +} + +func Test_Arguments_Int(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, 123, args.Int(1)) + +} + +func Test_Arguments_Bool(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, true, args.Bool(2)) + +} + +func Test_WaitUntil_Parallel(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + ch1 := make(chan time.Time) + ch2 := make(chan time.Time) + + mockedService.Mock.On("TheExampleMethod2", true).Return().WaitUntil(ch2).Run(func(args Arguments) { + ch1 <- time.Now() + }) + + mockedService.Mock.On("TheExampleMethod2", false).Return().WaitUntil(ch1) + + // Lock both goroutines on the .WaitUntil method + go func() { + mockedService.TheExampleMethod2(false) + }() + go func() { + mockedService.TheExampleMethod2(true) + }() + + // Allow the first call to execute, so the second one executes afterwards + ch2 <- time.Now() +} + +func Test_MockMethodCalled(t *testing.T) { + m := new(Mock) + m.On("foo", "hello").Return("world") + + retArgs := m.MethodCalled("foo", "hello") + require.True(t, len(retArgs) == 1) + require.Equal(t, "world", retArgs[0]) + m.AssertExpectations(t) +} + +// Test to validate fix for racy concurrent call access in MethodCalled() +func Test_MockReturnAndCalledConcurrent(t *testing.T) { + iterations := 1000 + m := &Mock{} + call := m.On("ConcurrencyTestMethod") + + wg := sync.WaitGroup{} + wg.Add(2) + + go func() { + for i := 0; i < iterations; i++ { + call.Return(10) + } + wg.Done() + }() + go func() { + for i := 0; i < iterations; i++ { + ConcurrencyTestMethod(m) + } + wg.Done() + }() + wg.Wait() +} + +type timer struct{ Mock } + +func (s *timer) GetTime(i int) string { + return s.Called(i).Get(0).(string) +} + +func TestAfterTotalWaitTimeWhileExecution(t *testing.T) { + waitDuration := 1 + total, waitMs := 5, time.Millisecond*time.Duration(waitDuration) + aTimer := new(timer) + for i := 0; i < total; i++ { + aTimer.On("GetTime", i).After(waitMs).Return(fmt.Sprintf("Time%d", i)).Once() + } + time.Sleep(waitMs) + start := time.Now() + var results []string + + for i := 0; i < total; i++ { + results = append(results, aTimer.GetTime(i)) + } + + end := time.Now() + elapsedTime := end.Sub(start) + assert.True(t, elapsedTime > waitMs, fmt.Sprintf("Total elapsed time:%v should be atleast greater than %v", elapsedTime, waitMs)) + assert.Equal(t, total, len(results)) + for i, _ := range results { + assert.Equal(t, fmt.Sprintf("Time%d", i), results[i], "Return value of method should be same") + } +} + +func ConcurrencyTestMethod(m *Mock) { + m.Called() +} diff --git a/vendor/github.com/stretchr/testify/package_test.go b/vendor/github.com/stretchr/testify/package_test.go new file mode 100644 index 000000000..7ac5d6d8e --- /dev/null +++ b/vendor/github.com/stretchr/testify/package_test.go @@ -0,0 +1,12 @@ +package testify + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestImports(t *testing.T) { + if assert.Equal(t, 1, 1) != true { + t.Error("Something is wrong.") + } +} diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go new file mode 100644 index 000000000..169de3922 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -0,0 +1,28 @@ +// Package require implements the same assertions as the `assert` package but +// stops test execution when a test fails. +// +// Example Usage +// +// The following is a complete example using require in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/require" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// require.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// Assertions +// +// The `require` package have same global functions as in the `assert` package, +// but instead of returning a boolean result they call `t.FailNow()`. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package require diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go new file mode 100644 index 000000000..ac71d4058 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go @@ -0,0 +1,16 @@ +package require + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements_test.go b/vendor/github.com/stretchr/testify/require/forward_requirements_test.go new file mode 100644 index 000000000..b120ae3b8 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/forward_requirements_test.go @@ -0,0 +1,385 @@ +package require + +import ( + "errors" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + require := New(t) + + require.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestIsTypeWrapper(t *testing.T) { + require := New(t) + require.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualWrapper(t *testing.T) { + require := New(t) + require.Equal(1, 1) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Equal(1, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEqualWrapper(t *testing.T) { + require := New(t) + require.NotEqual(1, 2) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotEqual(2, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestExactlyWrapper(t *testing.T) { + require := New(t) + + a := float32(1) + b := float32(1) + c := float64(1) + + require.Exactly(a, b) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Exactly(a, c) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotNilWrapper(t *testing.T) { + require := New(t) + require.NotNil(t, new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotNil(nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNilWrapper(t *testing.T) { + require := New(t) + require.Nil(nil) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Nil(new(AssertionTesterConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestTrueWrapper(t *testing.T) { + require := New(t) + require.True(true) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.True(false) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestFalseWrapper(t *testing.T) { + require := New(t) + require.False(false) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.False(true) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestContainsWrapper(t *testing.T) { + require := New(t) + require.Contains("Hello World", "Hello") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Contains("Hello World", "Salut") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotContainsWrapper(t *testing.T) { + require := New(t) + require.NotContains("Hello World", "Hello!") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotContains("Hello World", "Hello") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestPanicsWrapper(t *testing.T) { + require := New(t) + require.Panics(func() { + panic("Panic!") + }) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Panics(func() {}) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotPanicsWrapper(t *testing.T) { + require := New(t) + require.NotPanics(func() {}) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotPanics(func() { + panic("Panic!") + }) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNoErrorWrapper(t *testing.T) { + require := New(t) + require.NoError(nil) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NoError(errors.New("some error")) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestErrorWrapper(t *testing.T) { + require := New(t) + require.Error(errors.New("some error")) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Error(nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualErrorWrapper(t *testing.T) { + require := New(t) + require.EqualError(errors.New("some error"), "some error") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.EqualError(errors.New("some error"), "Not some error") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEmptyWrapper(t *testing.T) { + require := New(t) + require.Empty("") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Empty("x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEmptyWrapper(t *testing.T) { + require := New(t) + require.NotEmpty("x") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotEmpty("") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestWithinDurationWrapper(t *testing.T) { + require := New(t) + a := time.Now() + b := a.Add(10 * time.Second) + + require.WithinDuration(a, b, 15*time.Second) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.WithinDuration(a, b, 5*time.Second) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestInDeltaWrapper(t *testing.T) { + require := New(t) + require.InDelta(1.001, 1, 0.01) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.InDelta(1, 2, 0.5) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestZeroWrapper(t *testing.T) { + require := New(t) + require.Zero(0) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Zero(1) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotZeroWrapper(t *testing.T) { + require := New(t) + require.NotZero(1) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotZero(0) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_EqualSONString(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_EquivalentButNotEqual(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_HashOfArraysAndHashes(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_Array(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_HashesNotEquivalent(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ActualIsNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"foo": "bar"}`, "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ExpectedIsNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("Not JSON", `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("Not JSON", "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) + if !mockT.Failed { + t.Error("Check should fail") + } +} diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go new file mode 100644 index 000000000..ac3c30878 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -0,0 +1,867 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package require + +import ( + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { + if !assert.Condition(t, comp, msgAndArgs...) { + t.FailNow() + } +} + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) { + if !assert.Conditionf(t, comp, msg, args...) { + t.FailNow() + } +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") +func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if !assert.Contains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if !assert.Containsf(t, s, contains, msg, args...) { + t.FailNow() + } +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { + if !assert.DirExists(t, path, msgAndArgs...) { + t.FailNow() + } +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { + if !assert.DirExistsf(t, path, msg, args...) { + t.FailNow() + } +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if !assert.ElementsMatch(t, listA, listB, msgAndArgs...) { + t.FailNow() + } +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if !assert.ElementsMatchf(t, listA, listB, msg, args...) { + t.FailNow() + } +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Empty(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.Emptyf(t, object, msg, args...) { + t.FailNow() + } +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Equal(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { + if !assert.EqualError(t, theError, errString, msgAndArgs...) { + t.FailNow() + } +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { + if !assert.EqualErrorf(t, theError, errString, msg, args...) { + t.FailNow() + } +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123)) +func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.EqualValues(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.EqualValuesf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.Equalf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } +func Error(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.Error(t, err, msgAndArgs...) { + t.FailNow() + } +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) { + if !assert.Errorf(t, err, msg, args...) { + t.FailNow() + } +} + +// Exactly asserts that two objects are equal in value and type. +// +// assert.Exactly(t, int32(123), int64(123)) +func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Exactly(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.Exactlyf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if !assert.Fail(t, failureMessage, msgAndArgs...) { + t.FailNow() + } +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if !assert.FailNow(t, failureMessage, msgAndArgs...) { + t.FailNow() + } +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) { + if !assert.FailNowf(t, failureMessage, msg, args...) { + t.FailNow() + } +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { + if !assert.Failf(t, failureMessage, msg, args...) { + t.FailNow() + } +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool) +func False(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.False(t, value, msgAndArgs...) { + t.FailNow() + } +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) { + if !assert.Falsef(t, value, msg, args...) { + t.FailNow() + } +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { + if !assert.FileExists(t, path, msgAndArgs...) { + t.FailNow() + } +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { + if !assert.FileExistsf(t, path, msg, args...) { + t.FailNow() + } +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if !assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if !assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { + t.FailNow() + } +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if !assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if !assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { + t.FailNow() + } +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if !assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if !assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { + t.FailNow() + } +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if !assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if !assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { + t.FailNow() + } +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if !assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if !assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { + t.FailNow() + } +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.Implements(t, interfaceObject, object, msgAndArgs...) { + t.FailNow() + } +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if !assert.Implementsf(t, interfaceObject, object, msg, args...) { + t.FailNow() + } +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if !assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if !assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if !assert.InDeltaf(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + t.FailNow() + } +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { + t.FailNow() + } +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if !assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { + t.FailNow() + } +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if !assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { + t.FailNow() + } +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.IsType(t, expectedType, object, msgAndArgs...) { + t.FailNow() + } +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { + if !assert.IsTypef(t, expectedType, object, msg, args...) { + t.FailNow() + } +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if !assert.JSONEq(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if !assert.JSONEqf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if !assert.Len(t, object, length, msgAndArgs...) { + t.FailNow() + } +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { + if !assert.Lenf(t, object, length, msg, args...) { + t.FailNow() + } +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err) +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Nil(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.Nilf(t, object, msg, args...) { + t.FailNow() + } +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.NoError(t, err, msgAndArgs...) { + t.FailNow() + } +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { + if !assert.NoErrorf(t, err, msg, args...) { + t.FailNow() + } +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") +func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if !assert.NotContains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if !assert.NotContainsf(t, s, contains, msg, args...) { + t.FailNow() + } +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotEmpty(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.NotEmptyf(t, object, msg, args...) { + t.FailNow() + } +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.NotEqual(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.NotEqualf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err) +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotNil(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.NotNilf(t, object, msg, args...) { + t.FailNow() + } +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ RemainCalm() }) +func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.NotPanics(t, f, msgAndArgs...) { + t.FailNow() + } +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if !assert.NotPanicsf(t, f, msg, args...) { + t.FailNow() + } +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.NotRegexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if !assert.NotRegexpf(t, rx, str, msg, args...) { + t.FailNow() + } +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if !assert.NotSubset(t, list, subset, msgAndArgs...) { + t.FailNow() + } +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if !assert.NotSubsetf(t, list, subset, msg, args...) { + t.FailNow() + } +} + +// NotZero asserts that i is not the zero value for its type. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if !assert.NotZero(t, i, msgAndArgs...) { + t.FailNow() + } +} + +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { + if !assert.NotZerof(t, i, msg, args...) { + t.FailNow() + } +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ GoCrazy() }) +func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.Panics(t, f, msgAndArgs...) { + t.FailNow() + } +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.PanicsWithValue(t, expected, f, msgAndArgs...) { + t.FailNow() + } +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + if !assert.PanicsWithValuef(t, expected, f, msg, args...) { + t.FailNow() + } +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if !assert.Panicsf(t, f, msg, args...) { + t.FailNow() + } +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.Regexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if !assert.Regexpf(t, rx, str, msg, args...) { + t.FailNow() + } +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if !assert.Subset(t, list, subset, msgAndArgs...) { + t.FailNow() + } +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if !assert.Subsetf(t, list, subset, msg, args...) { + t.FailNow() + } +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool) +func True(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.True(t, value, msgAndArgs...) { + t.FailNow() + } +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) { + if !assert.Truef(t, value, msg, args...) { + t.FailNow() + } +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + if !assert.WithinDurationf(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// Zero asserts that i is the zero value for its type. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if !assert.Zero(t, i, msgAndArgs...) { + t.FailNow() + } +} + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) { + if !assert.Zerof(t, i, msg, args...) { + t.FailNow() + } +} diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl new file mode 100644 index 000000000..d2c38f6f2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -0,0 +1,6 @@ +{{.Comment}} +func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { + if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { + t.FailNow() + } +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go new file mode 100644 index 000000000..299ceb95a --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -0,0 +1,687 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package require + +import ( + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { + Condition(a.t, comp, msgAndArgs...) +} + +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...interface{}) { + Conditionf(a.t, comp, msg, args...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + Contains(a.t, s, contains, msgAndArgs...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) { + DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) { + DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + ElementsMatchf(a.t, listA, listB, msg, args...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + Empty(a.t, object, msgAndArgs...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { + Emptyf(a.t, object, msg, args...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { + EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) { + EqualErrorf(a.t, theError, errString, msg, args...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123)) +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + Equalf(a.t, expected, actual, msg, args...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { + Error(a.t, err, msgAndArgs...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { + Errorf(a.t, err, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. +// +// a.Exactly(int32(123), int64(123)) +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + Exactlyf(a.t, expected, actual, msg, args...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { + Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) { + FailNow(a.t, failureMessage, msgAndArgs...) +} + +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) { + FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) { + Failf(a.t, failureMessage, msg, args...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool) +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + False(a.t, value, msgAndArgs...) +} + +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) { + Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) { + FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { + FileExistsf(a.t, path, msg, args...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + HTTPErrorf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + HTTPRedirectf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + HTTPSuccessf(a.t, handler, method, url, values, msg, args...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + Implementsf(a.t, interfaceObject, object, msg, args...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + IsType(a.t, expectedType, object, msgAndArgs...) +} + +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { + IsTypef(a.t, expectedType, object, msg, args...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { + JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) { + JSONEqf(a.t, expected, actual, msg, args...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3) +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { + Len(a.t, object, length, msgAndArgs...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) { + Lenf(a.t, object, length, msg, args...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err) +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + Nil(a.t, object, msgAndArgs...) +} + +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { + Nilf(a.t, object, msg, args...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + NoError(a.t, err, msgAndArgs...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { + NoErrorf(a.t, err, msg, args...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + NotContainsf(a.t, s, contains, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) { + NotEmptyf(a.t, object, msg, args...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + NotEqualf(a.t, expected, actual, msg, args...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err) +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + NotNil(a.t, object, msgAndArgs...) +} + +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) { + NotNilf(a.t, object, msg, args...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ RemainCalm() }) +func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + NotPanics(a.t, f, msgAndArgs...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + NotPanicsf(a.t, f, msg, args...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { + NotZero(a.t, i, msgAndArgs...) +} + +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) { + NotZerof(a.t, i, msg, args...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ GoCrazy() }) +func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + Panics(a.t, f, msgAndArgs...) +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + Panicsf(a.t, f, msg, args...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + Regexp(a.t, rx, str, msgAndArgs...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + Regexpf(a.t, rx, str, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + Subsetf(a.t, list, subset, msg, args...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool) +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + True(a.t, value, msgAndArgs...) +} + +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { + Truef(a.t, value, msg, args...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// Zero asserts that i is the zero value for its type. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { + Zero(a.t, i, msgAndArgs...) +} + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) { + Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl new file mode 100644 index 000000000..b93569e0a --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { + {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go new file mode 100644 index 000000000..e404f016d --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -0,0 +1,9 @@ +package require + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) + FailNow() +} + +//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/require/requirements_test.go b/vendor/github.com/stretchr/testify/require/requirements_test.go new file mode 100644 index 000000000..d2ccc99c9 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/requirements_test.go @@ -0,0 +1,369 @@ +package require + +import ( + "errors" + "testing" + "time" +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +type MockT struct { + Failed bool +} + +func (t *MockT) FailNow() { + t.Failed = true +} + +func (t *MockT) Errorf(format string, args ...interface{}) { + _, _ = format, args +} + +func TestImplements(t *testing.T) { + + Implements(t, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestIsType(t *testing.T) { + + IsType(t, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqual(t *testing.T) { + + Equal(t, 1, 1) + + mockT := new(MockT) + Equal(mockT, 1, 2) + if !mockT.Failed { + t.Error("Check should fail") + } + +} + +func TestNotEqual(t *testing.T) { + + NotEqual(t, 1, 2) + mockT := new(MockT) + NotEqual(mockT, 2, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestExactly(t *testing.T) { + + a := float32(1) + b := float32(1) + c := float64(1) + + Exactly(t, a, b) + + mockT := new(MockT) + Exactly(mockT, a, c) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotNil(t *testing.T) { + + NotNil(t, new(AssertionTesterConformingObject)) + + mockT := new(MockT) + NotNil(mockT, nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNil(t *testing.T) { + + Nil(t, nil) + + mockT := new(MockT) + Nil(mockT, new(AssertionTesterConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestTrue(t *testing.T) { + + True(t, true) + + mockT := new(MockT) + True(mockT, false) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestFalse(t *testing.T) { + + False(t, false) + + mockT := new(MockT) + False(mockT, true) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestContains(t *testing.T) { + + Contains(t, "Hello World", "Hello") + + mockT := new(MockT) + Contains(mockT, "Hello World", "Salut") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotContains(t *testing.T) { + + NotContains(t, "Hello World", "Hello!") + + mockT := new(MockT) + NotContains(mockT, "Hello World", "Hello") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestPanics(t *testing.T) { + + Panics(t, func() { + panic("Panic!") + }) + + mockT := new(MockT) + Panics(mockT, func() {}) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotPanics(t *testing.T) { + + NotPanics(t, func() {}) + + mockT := new(MockT) + NotPanics(mockT, func() { + panic("Panic!") + }) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNoError(t *testing.T) { + + NoError(t, nil) + + mockT := new(MockT) + NoError(mockT, errors.New("some error")) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestError(t *testing.T) { + + Error(t, errors.New("some error")) + + mockT := new(MockT) + Error(mockT, nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualError(t *testing.T) { + + EqualError(t, errors.New("some error"), "some error") + + mockT := new(MockT) + EqualError(mockT, errors.New("some error"), "Not some error") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEmpty(t *testing.T) { + + Empty(t, "") + + mockT := new(MockT) + Empty(mockT, "x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEmpty(t *testing.T) { + + NotEmpty(t, "x") + + mockT := new(MockT) + NotEmpty(mockT, "") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestWithinDuration(t *testing.T) { + + a := time.Now() + b := a.Add(10 * time.Second) + + WithinDuration(t, a, b, 15*time.Second) + + mockT := new(MockT) + WithinDuration(mockT, a, b, 5*time.Second) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestInDelta(t *testing.T) { + + InDelta(t, 1.001, 1, 0.01) + + mockT := new(MockT) + InDelta(mockT, 1, 2, 0.5) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestZero(t *testing.T) { + + Zero(t, "") + + mockT := new(MockT) + Zero(mockT, "x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotZero(t *testing.T) { + + NotZero(t, "x") + + mockT := new(MockT) + NotZero(mockT, "") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_EqualSONString(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_EquivalentButNotEqual(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_HashOfArraysAndHashes(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_Array(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_HashesNotEquivalent(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ActualIsNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"foo": "bar"}`, "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ExpectedIsNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "Not JSON", `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "Not JSON", "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) + if !mockT.Failed { + t.Error("Check should fail") + } +} diff --git a/vendor/github.com/stretchr/testify/suite/doc.go b/vendor/github.com/stretchr/testify/suite/doc.go new file mode 100644 index 000000000..f91a245d3 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/doc.go @@ -0,0 +1,65 @@ +// Package suite contains logic for creating testing suite structs +// and running the methods on those structs as tests. The most useful +// piece of this package is that you can create setup/teardown methods +// on your testing suites, which will run before/after the whole suite +// or individual tests (depending on which interface(s) you +// implement). +// +// A testing suite is usually built by first extending the built-in +// suite functionality from suite.Suite in testify. Alternatively, +// you could reproduce that logic on your own if you wanted (you +// just need to implement the TestingSuite interface from +// suite/interfaces.go). +// +// After that, you can implement any of the interfaces in +// suite/interfaces.go to add setup/teardown functionality to your +// suite, and add any methods that start with "Test" to add tests. +// Methods that do not match any suite interfaces and do not begin +// with "Test" will not be run by testify, and can safely be used as +// helper methods. +// +// Once you've built your testing suite, you need to run the suite +// (using suite.Run from testify) inside any function that matches the +// identity that "go test" is already looking for (i.e. +// func(*testing.T)). +// +// Regular expression to select test suites specified command-line +// argument "-run". Regular expression to select the methods +// of test suites specified command-line argument "-m". +// Suite object has assertion methods. +// +// A crude example: +// // Basic imports +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// ) +// +// // Define the suite, and absorb the built-in basic suite +// // functionality from testify - including a T() method which +// // returns the current testing context +// type ExampleTestSuite struct { +// suite.Suite +// VariableThatShouldStartAtFive int +// } +// +// // Make sure that VariableThatShouldStartAtFive is set to five +// // before each test +// func (suite *ExampleTestSuite) SetupTest() { +// suite.VariableThatShouldStartAtFive = 5 +// } +// +// // All methods that begin with "Test" are run as tests within a +// // suite. +// func (suite *ExampleTestSuite) TestExample() { +// assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +// suite.Equal(5, suite.VariableThatShouldStartAtFive) +// } +// +// // In order for 'go test' to run this suite, we need to create +// // a normal test function and pass our suite to suite.Run +// func TestExampleTestSuite(t *testing.T) { +// suite.Run(t, new(ExampleTestSuite)) +// } +package suite diff --git a/vendor/github.com/stretchr/testify/suite/interfaces.go b/vendor/github.com/stretchr/testify/suite/interfaces.go new file mode 100644 index 000000000..b37cb0409 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/interfaces.go @@ -0,0 +1,46 @@ +package suite + +import "testing" + +// TestingSuite can store and return the current *testing.T context +// generated by 'go test'. +type TestingSuite interface { + T() *testing.T + SetT(*testing.T) +} + +// SetupAllSuite has a SetupSuite method, which will run before the +// tests in the suite are run. +type SetupAllSuite interface { + SetupSuite() +} + +// SetupTestSuite has a SetupTest method, which will run before each +// test in the suite. +type SetupTestSuite interface { + SetupTest() +} + +// TearDownAllSuite has a TearDownSuite method, which will run after +// all the tests in the suite have been run. +type TearDownAllSuite interface { + TearDownSuite() +} + +// TearDownTestSuite has a TearDownTest method, which will run after +// each test in the suite. +type TearDownTestSuite interface { + TearDownTest() +} + +// BeforeTest has a function to be executed right before the test +// starts and receives the suite and test names as input +type BeforeTest interface { + BeforeTest(suiteName, testName string) +} + +// AfterTest has a function to be executed right after the test +// finishes and receives the suite and test names as input +type AfterTest interface { + AfterTest(suiteName, testName string) +} diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go new file mode 100644 index 000000000..e20afbc21 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/suite.go @@ -0,0 +1,136 @@ +package suite + +import ( + "flag" + "fmt" + "os" + "reflect" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var allTestsFilter = func(_, _ string) (bool, error) { return true, nil } +var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run") + +// Suite is a basic testing suite with methods for storing and +// retrieving the current *testing.T context. +type Suite struct { + *assert.Assertions + require *require.Assertions + t *testing.T +} + +// T retrieves the current *testing.T context. +func (suite *Suite) T() *testing.T { + return suite.t +} + +// SetT sets the current *testing.T context. +func (suite *Suite) SetT(t *testing.T) { + suite.t = t + suite.Assertions = assert.New(t) + suite.require = require.New(t) +} + +// Require returns a require context for suite. +func (suite *Suite) Require() *require.Assertions { + if suite.require == nil { + suite.require = require.New(suite.T()) + } + return suite.require +} + +// Assert returns an assert context for suite. Normally, you can call +// `suite.NoError(expected, actual)`, but for situations where the embedded +// methods are overridden (for example, you might want to override +// assert.Assertions with require.Assertions), this method is provided so you +// can call `suite.Assert().NoError()`. +func (suite *Suite) Assert() *assert.Assertions { + if suite.Assertions == nil { + suite.Assertions = assert.New(suite.T()) + } + return suite.Assertions +} + +// Run takes a testing suite and runs all of the tests attached +// to it. +func Run(t *testing.T, suite TestingSuite) { + suite.SetT(t) + + if setupAllSuite, ok := suite.(SetupAllSuite); ok { + setupAllSuite.SetupSuite() + } + defer func() { + if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { + tearDownAllSuite.TearDownSuite() + } + }() + + methodFinder := reflect.TypeOf(suite) + tests := []testing.InternalTest{} + for index := 0; index < methodFinder.NumMethod(); index++ { + method := methodFinder.Method(index) + ok, err := methodFilter(method.Name) + if err != nil { + fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) + os.Exit(1) + } + if ok { + test := testing.InternalTest{ + Name: method.Name, + F: func(t *testing.T) { + parentT := suite.T() + suite.SetT(t) + if setupTestSuite, ok := suite.(SetupTestSuite); ok { + setupTestSuite.SetupTest() + } + if beforeTestSuite, ok := suite.(BeforeTest); ok { + beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) + } + defer func() { + if afterTestSuite, ok := suite.(AfterTest); ok { + afterTestSuite.AfterTest(methodFinder.Elem().Name(), method.Name) + } + if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { + tearDownTestSuite.TearDownTest() + } + suite.SetT(parentT) + }() + method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) + }, + } + tests = append(tests, test) + } + } + runTests(t, tests) +} + +func runTests(t testing.TB, tests []testing.InternalTest) { + r, ok := t.(runner) + if !ok { // backwards compatibility with Go 1.6 and below + if !testing.RunTests(allTestsFilter, tests) { + t.Fail() + } + return + } + + for _, test := range tests { + r.Run(test.Name, test.F) + } +} + +// Filtering method according to set regular expression +// specified command-line argument -m +func methodFilter(name string) (bool, error) { + if ok, _ := regexp.MatchString("^Test", name); !ok { + return false, nil + } + return regexp.MatchString(*matchMethod, name) +} + +type runner interface { + Run(name string, f func(t *testing.T)) bool +} diff --git a/vendor/github.com/stretchr/testify/suite/suite_test.go b/vendor/github.com/stretchr/testify/suite/suite_test.go new file mode 100644 index 000000000..b75fa4ac1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/suite_test.go @@ -0,0 +1,294 @@ +package suite + +import ( + "errors" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// SuiteRequireTwice is intended to test the usage of suite.Require in two +// different tests +type SuiteRequireTwice struct{ Suite } + +// TestSuiteRequireTwice checks for regressions of issue #149 where +// suite.requirements was not initialised in suite.SetT() +// A regression would result on these tests panicking rather than failing. +func TestSuiteRequireTwice(t *testing.T) { + ok := testing.RunTests( + allTestsFilter, + []testing.InternalTest{{ + Name: "TestSuiteRequireTwice", + F: func(t *testing.T) { + suite := new(SuiteRequireTwice) + Run(t, suite) + }, + }}, + ) + assert.Equal(t, false, ok) +} + +func (s *SuiteRequireTwice) TestRequireOne() { + r := s.Require() + r.Equal(1, 2) +} + +func (s *SuiteRequireTwice) TestRequireTwo() { + r := s.Require() + r.Equal(1, 2) +} + +// This suite is intended to store values to make sure that only +// testing-suite-related methods are run. It's also a fully +// functional example of a testing suite, using setup/teardown methods +// and a helper method that is ignored by testify. To make this look +// more like a real world example, all tests in the suite perform some +// type of assertion. +type SuiteTester struct { + // Include our basic suite logic. + Suite + + // Keep counts of how many times each method is run. + SetupSuiteRunCount int + TearDownSuiteRunCount int + SetupTestRunCount int + TearDownTestRunCount int + TestOneRunCount int + TestTwoRunCount int + NonTestMethodRunCount int + + SuiteNameBefore []string + TestNameBefore []string + + SuiteNameAfter []string + TestNameAfter []string + + TimeBefore []time.Time + TimeAfter []time.Time +} + +type SuiteSkipTester struct { + // Include our basic suite logic. + Suite + + // Keep counts of how many times each method is run. + SetupSuiteRunCount int + TearDownSuiteRunCount int +} + +// The SetupSuite method will be run by testify once, at the very +// start of the testing suite, before any tests are run. +func (suite *SuiteTester) SetupSuite() { + suite.SetupSuiteRunCount++ +} + +func (suite *SuiteTester) BeforeTest(suiteName, testName string) { + suite.SuiteNameBefore = append(suite.SuiteNameBefore, suiteName) + suite.TestNameBefore = append(suite.TestNameBefore, testName) + suite.TimeBefore = append(suite.TimeBefore, time.Now()) +} + +func (suite *SuiteTester) AfterTest(suiteName, testName string) { + suite.SuiteNameAfter = append(suite.SuiteNameAfter, suiteName) + suite.TestNameAfter = append(suite.TestNameAfter, testName) + suite.TimeAfter = append(suite.TimeAfter, time.Now()) +} + +func (suite *SuiteSkipTester) SetupSuite() { + suite.SetupSuiteRunCount++ + suite.T().Skip() +} + +// The TearDownSuite method will be run by testify once, at the very +// end of the testing suite, after all tests have been run. +func (suite *SuiteTester) TearDownSuite() { + suite.TearDownSuiteRunCount++ +} + +func (suite *SuiteSkipTester) TearDownSuite() { + suite.TearDownSuiteRunCount++ +} + +// The SetupTest method will be run before every test in the suite. +func (suite *SuiteTester) SetupTest() { + suite.SetupTestRunCount++ +} + +// The TearDownTest method will be run after every test in the suite. +func (suite *SuiteTester) TearDownTest() { + suite.TearDownTestRunCount++ +} + +// Every method in a testing suite that begins with "Test" will be run +// as a test. TestOne is an example of a test. For the purposes of +// this example, we've included assertions in the tests, since most +// tests will issue assertions. +func (suite *SuiteTester) TestOne() { + beforeCount := suite.TestOneRunCount + suite.TestOneRunCount++ + assert.Equal(suite.T(), suite.TestOneRunCount, beforeCount+1) + suite.Equal(suite.TestOneRunCount, beforeCount+1) +} + +// TestTwo is another example of a test. +func (suite *SuiteTester) TestTwo() { + beforeCount := suite.TestTwoRunCount + suite.TestTwoRunCount++ + assert.NotEqual(suite.T(), suite.TestTwoRunCount, beforeCount) + suite.NotEqual(suite.TestTwoRunCount, beforeCount) +} + +func (suite *SuiteTester) TestSkip() { + suite.T().Skip() +} + +// NonTestMethod does not begin with "Test", so it will not be run by +// testify as a test in the suite. This is useful for creating helper +// methods for your tests. +func (suite *SuiteTester) NonTestMethod() { + suite.NonTestMethodRunCount++ +} + +// TestRunSuite will be run by the 'go test' command, so within it, we +// can run our suite using the Run(*testing.T, TestingSuite) function. +func TestRunSuite(t *testing.T) { + suiteTester := new(SuiteTester) + Run(t, suiteTester) + + // Normally, the test would end here. The following are simply + // some assertions to ensure that the Run function is working as + // intended - they are not part of the example. + + // The suite was only run once, so the SetupSuite and TearDownSuite + // methods should have each been run only once. + assert.Equal(t, suiteTester.SetupSuiteRunCount, 1) + assert.Equal(t, suiteTester.TearDownSuiteRunCount, 1) + + assert.Equal(t, len(suiteTester.SuiteNameAfter), 3) + assert.Equal(t, len(suiteTester.SuiteNameBefore), 3) + assert.Equal(t, len(suiteTester.TestNameAfter), 3) + assert.Equal(t, len(suiteTester.TestNameBefore), 3) + + assert.Contains(t, suiteTester.TestNameAfter, "TestOne") + assert.Contains(t, suiteTester.TestNameAfter, "TestTwo") + assert.Contains(t, suiteTester.TestNameAfter, "TestSkip") + + assert.Contains(t, suiteTester.TestNameBefore, "TestOne") + assert.Contains(t, suiteTester.TestNameBefore, "TestTwo") + assert.Contains(t, suiteTester.TestNameBefore, "TestSkip") + + for _, suiteName := range suiteTester.SuiteNameAfter { + assert.Equal(t, "SuiteTester", suiteName) + } + + for _, suiteName := range suiteTester.SuiteNameBefore { + assert.Equal(t, "SuiteTester", suiteName) + } + + for _, when := range suiteTester.TimeAfter { + assert.False(t, when.IsZero()) + } + + for _, when := range suiteTester.TimeBefore { + assert.False(t, when.IsZero()) + } + + // There are three test methods (TestOne, TestTwo, and TestSkip), so + // the SetupTest and TearDownTest methods (which should be run once for + // each test) should have been run three times. + assert.Equal(t, suiteTester.SetupTestRunCount, 3) + assert.Equal(t, suiteTester.TearDownTestRunCount, 3) + + // Each test should have been run once. + assert.Equal(t, suiteTester.TestOneRunCount, 1) + assert.Equal(t, suiteTester.TestTwoRunCount, 1) + + // Methods that don't match the test method identifier shouldn't + // have been run at all. + assert.Equal(t, suiteTester.NonTestMethodRunCount, 0) + + suiteSkipTester := new(SuiteSkipTester) + Run(t, suiteSkipTester) + + // The suite was only run once, so the SetupSuite and TearDownSuite + // methods should have each been run only once, even though SetupSuite + // called Skip() + assert.Equal(t, suiteSkipTester.SetupSuiteRunCount, 1) + assert.Equal(t, suiteSkipTester.TearDownSuiteRunCount, 1) + +} + +func TestSuiteGetters(t *testing.T) { + suite := new(SuiteTester) + suite.SetT(t) + assert.NotNil(t, suite.Assert()) + assert.Equal(t, suite.Assertions, suite.Assert()) + assert.NotNil(t, suite.Require()) + assert.Equal(t, suite.require, suite.Require()) +} + +type SuiteLoggingTester struct { + Suite +} + +func (s *SuiteLoggingTester) TestLoggingPass() { + s.T().Log("TESTLOGPASS") +} + +func (s *SuiteLoggingTester) TestLoggingFail() { + s.T().Log("TESTLOGFAIL") + assert.NotNil(s.T(), nil) // expected to fail +} + +type StdoutCapture struct { + oldStdout *os.File + readPipe *os.File +} + +func (sc *StdoutCapture) StartCapture() { + sc.oldStdout = os.Stdout + sc.readPipe, os.Stdout, _ = os.Pipe() +} + +func (sc *StdoutCapture) StopCapture() (string, error) { + if sc.oldStdout == nil || sc.readPipe == nil { + return "", errors.New("StartCapture not called before StopCapture") + } + os.Stdout.Close() + os.Stdout = sc.oldStdout + bytes, err := ioutil.ReadAll(sc.readPipe) + if err != nil { + return "", err + } + return string(bytes), nil +} + +func TestSuiteLogging(t *testing.T) { + suiteLoggingTester := new(SuiteLoggingTester) + capture := StdoutCapture{} + internalTest := testing.InternalTest{ + Name: "SomeTest", + F: func(subT *testing.T) { + Run(subT, suiteLoggingTester) + }, + } + capture.StartCapture() + testing.RunTests(allTestsFilter, []testing.InternalTest{internalTest}) + output, err := capture.StopCapture() + require.NoError(t, err, "Got an error trying to capture stdout and stderr!") + require.NotEmpty(t, output, "output content must not be empty") + + // Failed tests' output is always printed + assert.Contains(t, output, "TESTLOGFAIL") + + if testing.Verbose() { + // In verbose mode, output from successful tests is also printed + assert.Contains(t, output, "TESTLOGPASS") + } else { + assert.NotContains(t, output, "TESTLOGPASS") + } +} diff --git a/vendor/github.com/termie/go-shutil/.gitignore b/vendor/github.com/termie/go-shutil/.gitignore new file mode 100644 index 000000000..139b1ee73 --- /dev/null +++ b/vendor/github.com/termie/go-shutil/.gitignore @@ -0,0 +1 @@ +test/testfile3 diff --git a/vendor/github.com/termie/go-shutil/LICENSE b/vendor/github.com/termie/go-shutil/LICENSE new file mode 100644 index 000000000..3890b94bd --- /dev/null +++ b/vendor/github.com/termie/go-shutil/LICENSE @@ -0,0 +1 @@ +I guess Python's? If that doesn't apply then MIT. Have fun. diff --git a/vendor/github.com/termie/go-shutil/README.rst b/vendor/github.com/termie/go-shutil/README.rst new file mode 100644 index 000000000..b63b016f7 --- /dev/null +++ b/vendor/github.com/termie/go-shutil/README.rst @@ -0,0 +1,24 @@ +========================================= +High-level Filesystem Operations (for Go) +========================================= + + +A direct port of a few of the functions from Python's shutil package for +high-level filesystem operations. + +This project pretty much only exists so that other people don't have to keep +re-writing this code in their projects, at this time we have been unable to +find any helpful packages for this in the stdlib or elsewhere. + +We don't expect it to be perfect, just better than whatever your first draft +would have been. Patches welcome. + +See also: https://docs.python.org/3.5/library/shutil.html + +================ +Functions So Far +================ + +We support Copy, CopyFile, CopyMode, and CopyTree. CopyStat would be nice if +anybody wants to write that. Also the other functions that might be useful in +the python library :D diff --git a/vendor/github.com/termie/go-shutil/shutil.go b/vendor/github.com/termie/go-shutil/shutil.go new file mode 100644 index 000000000..09fcd38a1 --- /dev/null +++ b/vendor/github.com/termie/go-shutil/shutil.go @@ -0,0 +1,326 @@ +package shutil + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + + +type SameFileError struct { + Src string + Dst string +} + +func (e SameFileError) Error() string { + return fmt.Sprintf("%s and %s are the same file", e.Src, e.Dst) +} + +type SpecialFileError struct { + File string + FileInfo os.FileInfo +} + +func (e SpecialFileError) Error() string { + return fmt.Sprintf("`%s` is a named pipe", e.File) +} + +type NotADirectoryError struct { + Src string +} + +func (e NotADirectoryError) Error() string { + return fmt.Sprintf("`%s` is not a directory", e.Src) +} + + +type AlreadyExistsError struct { + Dst string +} + +func (e AlreadyExistsError) Error() string { + return fmt.Sprintf("`%s` already exists", e.Dst) +} + + +func samefile(src string, dst string) bool { + srcInfo, _ := os.Stat(src) + dstInfo, _ := os.Stat(dst) + return os.SameFile(srcInfo, dstInfo) +} + +func specialfile(fi os.FileInfo) bool { + return (fi.Mode() & os.ModeNamedPipe) == os.ModeNamedPipe +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +func IsSymlink(fi os.FileInfo) bool { + return (fi.Mode() & os.ModeSymlink) == os.ModeSymlink +} + + +// Copy data from src to dst +// +// If followSymlinks is not set and src is a symbolic link, a +// new symlink will be created instead of copying the file it points +// to. +func CopyFile(src, dst string, followSymlinks bool) (error) { + if samefile(src, dst) { + return &SameFileError{src, dst} + } + + // Make sure src exists and neither are special files + srcStat, err := os.Lstat(src) + if err != nil { + return err + } + if specialfile(srcStat) { + return &SpecialFileError{src, srcStat} + } + + dstStat, err := os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + if specialfile(dstStat) { + return &SpecialFileError{dst, dstStat} + } + } + + // If we don't follow symlinks and it's a symlink, just link it and be done + if !followSymlinks && IsSymlink(srcStat) { + return os.Symlink(src, dst) + } + + // If we are a symlink, follow it + if IsSymlink(srcStat) { + src, err = os.Readlink(src) + if err != nil { + return err + } + srcStat, err = os.Stat(src) + if err != nil { + return err + } + } + + // Do the actual copy + fsrc, err := os.Open(src) + if err != nil { + return err + } + defer fsrc.Close() + + fdst, err := os.Create(dst) + if err != nil { + return err + } + defer fdst.Close() + + size, err := io.Copy(fdst, fsrc) + if err != nil { + return err + } + + if size != srcStat.Size() { + return fmt.Errorf("%s: %d/%d copied", src, size, srcStat.Size()) + } + + return nil +} + + +// Copy mode bits from src to dst. +// +// If followSymlinks is false, symlinks aren't followed if and only +// if both `src` and `dst` are symlinks. If `lchmod` isn't available +// and both are symlinks this does nothing. (I don't think lchmod is +// available in Go) +func CopyMode(src, dst string, followSymlinks bool) error { + srcStat, err := os.Lstat(src) + if err != nil { + return err + } + + dstStat, err := os.Lstat(dst) + if err != nil { + return err + } + + // They are both symlinks and we can't change mode on symlinks. + if !followSymlinks && IsSymlink(srcStat) && IsSymlink(dstStat) { + return nil + } + + // Atleast one is not a symlink, get the actual file stats + srcStat, _ = os.Stat(src) + err = os.Chmod(dst, srcStat.Mode()) + return err +} + + +// Copy data and mode bits ("cp src dst"). Return the file's destination. +// +// The destination may be a directory. +// +// If followSymlinks is false, symlinks won't be followed. This +// resembles GNU's "cp -P src dst". +// +// If source and destination are the same file, a SameFileError will be +// rased. +func Copy(src, dst string, followSymlinks bool) (string, error){ + dstInfo, err := os.Stat(dst) + + if err == nil && dstInfo.Mode().IsDir() { + dst = filepath.Join(dst, filepath.Base(src)) + } + + if err != nil && !os.IsNotExist(err) { + return dst, err + } + + err = CopyFile(src, dst, followSymlinks) + if err != nil { + return dst, err + } + + err = CopyMode(src, dst, followSymlinks) + if err != nil { + return dst, err + } + + return dst, nil +} + +type CopyTreeOptions struct { + Symlinks bool + IgnoreDanglingSymlinks bool + CopyFunction func (string, string, bool) (string, error) + Ignore func (string, []os.FileInfo) []string +} + +// Recursively copy a directory tree. +// +// The destination directory must not already exist. +// +// If the optional Symlinks flag is true, symbolic links in the +// source tree result in symbolic links in the destination tree; if +// it is false, the contents of the files pointed to by symbolic +// links are copied. If the file pointed by the symlink doesn't +// exist, an error will be returned. +// +// You can set the optional IgnoreDanglingSymlinks flag to true if you +// want to silence this error. Notice that this has no effect on +// platforms that don't support os.Symlink. +// +// The optional ignore argument is a callable. If given, it +// is called with the `src` parameter, which is the directory +// being visited by CopyTree(), and `names` which is the list of +// `src` contents, as returned by ioutil.ReadDir(): +// +// callable(src, entries) -> ignoredNames +// +// Since CopyTree() is called recursively, the callable will be +// called once for each directory that is copied. It returns a +// list of names relative to the `src` directory that should +// not be copied. +// +// The optional copyFunction argument is a callable that will be used +// to copy each file. It will be called with the source path and the +// destination path as arguments. By default, Copy() is used, but any +// function that supports the same signature (like Copy2() when it +// exists) can be used. +func CopyTree(src, dst string, options *CopyTreeOptions) error { + if options == nil { + options = &CopyTreeOptions{Symlinks:false, + Ignore:nil, + CopyFunction:Copy, + IgnoreDanglingSymlinks:false} + } + + + srcFileInfo, err := os.Stat(src) + if err != nil { + return err + } + + if !srcFileInfo.IsDir() { + return &NotADirectoryError{src} + } + + _, err = os.Open(dst) + if !os.IsNotExist(err) { + return &AlreadyExistsError{dst} + } + + entries, err := ioutil.ReadDir(src) + if err != nil { + return err + } + + err = os.MkdirAll(dst, srcFileInfo.Mode()) + if err != nil { + return err + } + + ignoredNames := []string{} + if options.Ignore != nil { + ignoredNames = options.Ignore(src, entries) + } + + for _, entry := range entries { + if stringInSlice(entry.Name(), ignoredNames) { + continue + } + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + entryFileInfo, err := os.Lstat(srcPath) + if err != nil { + return err + } + + // Deal with symlinks + if IsSymlink(entryFileInfo) { + linkTo, err := os.Readlink(srcPath) + if err != nil { + return err + } + if options.Symlinks { + os.Symlink(linkTo, dstPath) + //CopyStat(srcPath, dstPath, false) + } else { + // ignore dangling symlink if flag is on + _, err = os.Stat(linkTo) + if os.IsNotExist(err) && options.IgnoreDanglingSymlinks { + continue + } + _, err = options.CopyFunction(srcPath, dstPath, false) + if err != nil { + return err + } + } + } else if entryFileInfo.IsDir() { + err = CopyTree(srcPath, dstPath, options) + if err != nil { + return err + } + } else { + _, err = options.CopyFunction(srcPath, dstPath, false) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/termie/go-shutil/shutil_test.go b/vendor/github.com/termie/go-shutil/shutil_test.go new file mode 100644 index 000000000..f6ec26196 --- /dev/null +++ b/vendor/github.com/termie/go-shutil/shutil_test.go @@ -0,0 +1,156 @@ +package shutil + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + + +func filesMatch(src, dst string) (bool, error) { + srcContents, err := ioutil.ReadFile(src) + if err != nil { + return false, err + } + + dstContents, err := ioutil.ReadFile(dst) + if err != nil { + return false, err + } + + if bytes.Compare(srcContents, dstContents) != 0 { + return false, nil + } + return true, nil +} + + +func TestSameFileError(t *testing.T) { + _, err := Copy("test/testfile", "test/testfile", false) + _, ok := err.(*SameFileError) + if !ok { + t.Error(err) + } +} + + +func TestCopyFile(t *testing.T) { + // clear out existing files if they exist + os.Remove("test/testfile3") + + err := CopyFile("test/testfile", "test/testfile3", false) + if err != nil { + t.Error(err) + return + } + + match, err := filesMatch("test/testfile", "test/testfile3") + if err != nil { + t.Error(err) + return + } + if !match { + t.Fail() + return + } + + // And again without clearing the files + err = CopyFile("test/testfile2", "test/testfile3", false) + if err != nil { + t.Error(err) + return + } + + match2, err := filesMatch("test/testfile2", "test/testfile3") + if err != nil { + t.Error(err) + return + } + + if !match2 { + t.Fail() + return + } +} + + +func TestCopy(t *testing.T) { + // clear out existing files if they exist + os.Remove("test/testfile3") + + _, err := Copy("test/testfile", "test/testfile3", false) + if err != nil { + t.Error(err) + return + } + + match, err := filesMatch("test/testfile", "test/testfile3") + if err != nil { + t.Error(err) + return + } + if !match { + t.Fail() + return + } + + // And again without clearing the files + _, err = Copy("test/testfile2", "test/testfile3", false) + if err != nil { + t.Error(err) + return + } + + match2, err := filesMatch("test/testfile2", "test/testfile3") + if err != nil { + t.Error(err) + return + } + + if !match2 { + t.Fail() + return + } +} + + +func TestCopyTree(t *testing.T) { + // clear out existing files if they exist + os.RemoveAll("test/testdir3") + + err := CopyTree("test/testdir", "test/testdir3", nil) + if err != nil { + t.Error(err) + return + } + + match, err := filesMatch("test/testdir/file1", "test/testdir3/file1") + if err != nil { + t.Error(err) + return + } + if !match { + t.Fail() + return + } + + // // And again without clearing the files + // _, err = Copy("test/testfile2", "test/testfile3", false) + // if err != nil { + // t.Error(err) + // return + // } + + // match2, err := filesMatch("test/testfile2", "test/testfile3") + // if err != nil { + // t.Error(err) + // return + // } + + // if !match2 { + // t.Fail() + // return + // } +} + diff --git a/vendor/github.com/termie/go-shutil/test/testdir/file1 b/vendor/github.com/termie/go-shutil/test/testdir/file1 new file mode 100644 index 000000000..e2129701f --- /dev/null +++ b/vendor/github.com/termie/go-shutil/test/testdir/file1 @@ -0,0 +1 @@ +file1 diff --git a/vendor/github.com/termie/go-shutil/test/testdir/file2 b/vendor/github.com/termie/go-shutil/test/testdir/file2 new file mode 100644 index 000000000..6c493ff74 --- /dev/null +++ b/vendor/github.com/termie/go-shutil/test/testdir/file2 @@ -0,0 +1 @@ +file2 diff --git a/vendor/github.com/termie/go-shutil/test/testfile b/vendor/github.com/termie/go-shutil/test/testfile new file mode 100644 index 000000000..26918572e --- /dev/null +++ b/vendor/github.com/termie/go-shutil/test/testfile @@ -0,0 +1 @@ +testfile diff --git a/vendor/github.com/termie/go-shutil/test/testfile2 b/vendor/github.com/termie/go-shutil/test/testfile2 new file mode 100644 index 000000000..7d576476a --- /dev/null +++ b/vendor/github.com/termie/go-shutil/test/testfile2 @@ -0,0 +1 @@ +testfile2 diff --git a/vendor/sigs.k8s.io/testing_frameworks/.travis.yml b/vendor/sigs.k8s.io/testing_frameworks/.travis.yml new file mode 100644 index 000000000..2a13715e5 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/.travis.yml @@ -0,0 +1,23 @@ +language: go + +go: + - 1.9.x + +go_import_path: sigs.k8s.io/testing_frameworks + +before_install: + - source ./bin/consider-early-travis-exit.sh + - ./bin/install-test-dependencies.sh + +# Install must be set to prevent default `go get` to run. +# The dependencies have already been vendored by `dep` so +# we don't need to fetch them. +install: + - + +script: + - ./bin/pre-commit.sh + +# TBD. Suppressing for now. +notifications: + email: false diff --git a/vendor/sigs.k8s.io/testing_frameworks/CONTRIBUTING.md b/vendor/sigs.k8s.io/testing_frameworks/CONTRIBUTING.md new file mode 100644 index 000000000..181384bf7 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing guidelines + +## Sign the CLA + +Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests. Please see https://git.k8s.io/community/CLA.md for more info + +### Contributing A Patch + +1. Submit an issue describing your proposed change to the repo in question. +1. The [repo owners](OWNERS) will respond to your issue promptly. +1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above). +1. Fork the desired repo, develop and test your code changes. +1. Submit a pull request. diff --git a/vendor/sigs.k8s.io/testing_frameworks/Gopkg.lock b/vendor/sigs.k8s.io/testing_frameworks/Gopkg.lock new file mode 100644 index 000000000..6c511c4a3 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/Gopkg.lock @@ -0,0 +1,108 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "925541529c1fa6821df4e44ce2723319eb2be768" + version = "v1.0.0" + +[[projects]] + name = "github.com/onsi/ginkgo" + packages = [ + ".", + "config", + "internal/codelocation", + "internal/containernode", + "internal/failer", + "internal/leafnodes", + "internal/remote", + "internal/spec", + "internal/spec_iterator", + "internal/specrunner", + "internal/suite", + "internal/testingtproxy", + "internal/writer", + "reporters", + "reporters/stenographer", + "reporters/stenographer/support/go-colorable", + "reporters/stenographer/support/go-isatty", + "types" + ] + revision = "9eda700730cba42af70d53180f9dcce9266bc2bc" + version = "v1.4.0" + +[[projects]] + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "gbytes", + "gexec", + "ghttp", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types" + ] + revision = "003f63b7f4cff3fc95357005358af2de0f5fe152" + version = "v1.3.0" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "html", + "html/atom", + "html/charset" + ] + revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "2c42eef0765b9837fbdab12011af7830f55f88f0" + +[[projects]] + branch = "master" + name = "golang.org/x/text" + packages = [ + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", + "internal/gen", + "internal/tag", + "internal/utf8internal", + "language", + "runes", + "transform", + "unicode/cldr" + ] + revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3" + +[[projects]] + branch = "v2" + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "786b91b457c79da1987e19ec103b6b530acd7ce0f468404fc16ab7fdbded810e" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/sigs.k8s.io/testing_frameworks/Gopkg.toml b/vendor/sigs.k8s.io/testing_frameworks/Gopkg.toml new file mode 100644 index 000000000..082953ae6 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/Gopkg.toml @@ -0,0 +1,30 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/onsi/ginkgo" + version = "1.4.0" + +[[constraint]] + name = "github.com/onsi/gomega" + version = "1.3.0" diff --git a/vendor/sigs.k8s.io/testing_frameworks/LICENSE b/vendor/sigs.k8s.io/testing_frameworks/LICENSE new file mode 100644 index 000000000..538111e1e --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 The Kubernetes Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/testing_frameworks/OWNERS b/vendor/sigs.k8s.io/testing_frameworks/OWNERS new file mode 100644 index 000000000..6b37fea82 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/devel/owners.md + +approvers: + - sig-testing-leads + - frameworks-admins diff --git a/vendor/sigs.k8s.io/testing_frameworks/OWNERS_ALIASES b/vendor/sigs.k8s.io/testing_frameworks/OWNERS_ALIASES new file mode 100644 index 000000000..fc9e466e6 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/OWNERS_ALIASES @@ -0,0 +1,12 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/devel/owners.md + +aliases: + sig-testing-leads: + - fejta + - spiffxp + - stevekuznetsov + - timothysc + frameworks-admins: + - apelisse + - hoegaarden + - totherme diff --git a/vendor/sigs.k8s.io/testing_frameworks/README.md b/vendor/sigs.k8s.io/testing_frameworks/README.md new file mode 100644 index 000000000..f5982af8d --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/README.md @@ -0,0 +1,21 @@ +# Frameworks + +Test frameworks for testing kubernetes + +This was created as a result of [kubernetes/community#1524](https://github.com/kubernetes/community/pull/1524) + +## What lives here? + +- The [integration test framework](integration/) + +## What is allowed to live here? + +Any test framework for testing any part of kubernetes is welcome so long as we +can avoid vendor loops. + +Right now, the only things vendored into this repo are +[ginkgo](https://github.com/onsi/ginkgo) and +[gomega](https://github.com/onsi/gomega). We would like to keep vendored +libraries to a minimum in order to make it as easy as possible to import these +frameworks into other kubernetes repos. Code in this repo should certainly +never import `k8s.io/kubernetes`. diff --git a/vendor/sigs.k8s.io/testing_frameworks/SECURITY_CONTACTS b/vendor/sigs.k8s.io/testing_frameworks/SECURITY_CONTACTS new file mode 100644 index 000000000..739996628 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/SECURITY_CONTACTS @@ -0,0 +1,15 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +hoegaarden +spiffxp +timothysc diff --git a/vendor/sigs.k8s.io/testing_frameworks/code-of-conduct.md b/vendor/sigs.k8s.io/testing_frameworks/code-of-conduct.md new file mode 100644 index 000000000..0d15c00cf --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/.gitignore b/vendor/sigs.k8s.io/testing_frameworks/integration/.gitignore new file mode 100644 index 000000000..16308b38c --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/.gitignore @@ -0,0 +1 @@ +assets/bin diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/README.md b/vendor/sigs.k8s.io/testing_frameworks/integration/README.md new file mode 100644 index 000000000..6ad37397f --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/README.md @@ -0,0 +1,8 @@ +# Integration Testing Framework + +A framework for integration testing components of kubernetes. This framework is +intended to work properly both in CI, and on a local dev machine. It therefore +explicitly supports both Linux and Darwin. + +For detailed documentation see the +[![GoDoc](https://godoc.org/github.com/kubernetes-sigs/testing_frameworks/integration?status.svg)](https://godoc.org/github.com/kubernetes-sigs/testing_frameworks/integration). diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go b/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go new file mode 100644 index 000000000..3bf2d81b6 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go @@ -0,0 +1,112 @@ +package integration + +import ( + "fmt" + "io" + "net/url" + "time" + + "sigs.k8s.io/testing_frameworks/integration/internal" +) + +// APIServer knows how to run a kubernetes apiserver. +type APIServer struct { + // URL is the address the ApiServer should listen on for client connections. + // + // If this is not specified, we default to a random free port on localhost. + URL *url.URL + + // Path is the path to the apiserver binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_KUBE_APISERVER environment variable, and + // the default test assets directory. See the "Binaries" section above (in + // doc.go) for details. + Path string + + // Args is a list of arguments which will passed to the APIServer binary. + // Before they are passed on, they will be evaluated as go-template strings. + // This means you can use fields which are defined and exported on this + // APIServer struct (e.g. "--cert-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the APIServer's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to caluclated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the APIServer will + // be used. + Args []string + + // CertDir is a path to a directory containing whatever certificates the + // APIServer will need. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + CertDir string + + // EtcdURL is the URL of the Etcd the APIServer should use. + // + // If this is not specified, the Start() method will return an error. + EtcdURL *url.URL + + // StartTimeout, StopTimeout specify the time the APIServer is allowed to + // take when starting and stoppping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where APIServer should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + processState *internal.ProcessState +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (s *APIServer) Start() error { + if s.EtcdURL == nil { + return fmt.Errorf("expected EtcdURL to be configured") + } + + var err error + + s.processState = &internal.ProcessState{} + + s.processState.DefaultedProcessInput, err = internal.DoDefaulting( + "kube-apiserver", + s.URL, + s.CertDir, + s.Path, + s.StartTimeout, + s.StopTimeout, + ) + if err != nil { + return err + } + + s.processState.HealthCheckEndpoint = "/healthz" + + s.URL = &s.processState.URL + s.CertDir = s.processState.Dir + s.Path = s.processState.Path + s.StartTimeout = s.processState.StartTimeout + s.StopTimeout = s.processState.StopTimeout + + s.processState.Args, err = internal.RenderTemplates( + internal.DoAPIServerArgDefaulting(s.Args), s, + ) + if err != nil { + return err + } + + return s.processState.Start(s.Out, s.Err) +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (s *APIServer) Stop() error { + return s.processState.Stop() +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/control_plane.go b/vendor/sigs.k8s.io/testing_frameworks/integration/control_plane.go new file mode 100644 index 000000000..a35083757 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/control_plane.go @@ -0,0 +1,59 @@ +package integration + +import ( + "fmt" + "net/url" +) + +// ControlPlane is a struct that knows how to start your test control plane. +// +// Right now, that means Etcd and your APIServer. This is likely to increase in +// future. +type ControlPlane struct { + APIServer *APIServer + Etcd *Etcd +} + +// Start will start your control plane processes. To stop them, call Stop(). +func (f *ControlPlane) Start() error { + if f.Etcd == nil { + f.Etcd = &Etcd{} + } + if err := f.Etcd.Start(); err != nil { + return err + } + + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + f.APIServer.EtcdURL = f.Etcd.URL + return f.APIServer.Start() +} + +// Stop will stop your control plane processes, and clean up their data. +func (f *ControlPlane) Stop() error { + if f.APIServer != nil { + if err := f.APIServer.Stop(); err != nil { + return err + } + } + if f.Etcd != nil { + if err := f.Etcd.Stop(); err != nil { + return err + } + } + return nil +} + +// APIURL returns the URL you should connect to to talk to your API. +func (f *ControlPlane) APIURL() *url.URL { + return f.APIServer.URL +} + +// KubeCtl returns a pre-configured KubeCtl, ready to connect to this +// ControlPlane. +func (f *ControlPlane) KubeCtl() *KubeCtl { + k := &KubeCtl{} + k.Opts = append(k.Opts, fmt.Sprintf("--server=%s", f.APIURL())) + return k +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/doc.go b/vendor/sigs.k8s.io/testing_frameworks/integration/doc.go new file mode 100644 index 000000000..01bc5ce55 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/doc.go @@ -0,0 +1,113 @@ +/* + +Package integration implements an integration testing framework for kubernetes. + +It provides components for standing up a kubernetes API, against which you can test a +kubernetes client, or other kubernetes components. The lifecycle of the components +needed to provide this API is managed by this framework. + +Quickstart + +If you want to test a kubernetes client against the latest kubernetes APIServer +and Etcd, you can use `./scripts/download-binaries.sh` to download APIServer +and Etcd binaries for your platform. Then add something like the following to +your tests: + + cp := &integration.ControlPlane{} + cp.Start() + kubeCtl := cp.KubeCtl() + stdout, stderr, err := kubeCtl.Run("get", "pods") + // You can check on err, stdout & stderr and build up + // your tests + cp.Stop() + +Components + +Currently the framework provides the following components: + +ControlPlane: The ControlPlane wraps Etcd & APIServer (see below) and wires +them together correctly. A ControlPlane can be stopped & started and can +provide the URL to connect to the API. The ControlPlane can also be asked for a +KubeCtl which is already correctly configured for this ControlPlane. The +ControlPlane is a good entry point for default setups. + +Etcd: Manages an Etcd binary, which can be started, stopped and connected to. +By default Etcd will listen on a random port for http connections and will +create a temporary directory for its data. To configure it differently, see the +Etcd type documentation below. + +APIServer: Manages an Kube-APIServer binary, which can be started, stopped and +connected to. By default APIServer will listen on a random port for http +connections and will create a temporary directory to store the (auto-generated) +certificates. To configure it differently, see the APIServer type +documentation below. + +KubeCtl: Wraps around a `kubectl` binary and can `Run(...)` arbitrary commands +against a kubernetes control plane. + +Binaries + +Etcd, APIServer & KubeCtl use the same mechanism to determine which binaries to +use when they get started. + +1. If the component is configured with a `Path` the framework tries to run that +binary. +For example: + + myEtcd := &Etcd{ + Path: "/some/other/etcd", + } + cp := &integration.ControlPlane{ + Etcd: myEtcd, + } + cp.Start() + +2. If the Path field on APIServer, Etcd or KubeCtl is left unset and an +environment variable named `TEST_ASSET_KUBE_APISERVER`, `TEST_ASSET_ETCD` or +`TEST_ASSET_KUBECTL` is set, its value is used as a path to the binary for the +APIServer, Etcd or KubeCtl. + +3. If neither the `Path` field, nor the environment variable is set, the +framework tries to use the binaries `kube-apiserver`, `etcd` or `kubectl` in +the directory `${FRAMEWORK_DIR}/assets/bin/`. + +For convenience this framework ships with +`${FRAMEWORK_DIR}/scripts/download-binaries.sh` which can be used to download +pre-compiled versions of the needed binaries and place them in the default +location (`${FRAMEWORK_DIR}/assets/bin/`). + +Arguments for Etcd and APIServer + +Those components will start without any configuration. However, if you want our +need to, you can override certain configuration -- one of which are the +arguments used when calling the binary. + +When you choose to specify your own set of arguments, those won't be appended +to the default set of arguments, it is your responsibility to provide all the +arguments needed for the binary to start successfully. + +All arguments are interpreted as go templates. Those templates have access to +all exported fields of the `APIServer`/`Etcd` struct. It does not matter if +those fields where explicitly set up or if they were defaulted by calling the +`Start()` method, the template evaluation runs just before the binary is +executed and right after the defaulting of all the struct's fields has +happened. + + // All arguments needed for a successful start must be specified + etcdArgs := []string{ + "--listen-peer-urls=http://localhost:0", + "--advertise-client-urls={{ .URL.String }}", + "--listen-client-urls={{ .URL.String }}", + "--data-dir={{ .DataDir }}", + // add some custom arguments + "--this-is-my-very-important-custom-argument", + "--arguments-dont-have-to-be-templates=but they can", + } + + etcd := &Etcd{ + Args: etcdArgs, + DataDir: "/my/special/data/dir", + } + +*/ +package integration diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go b/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go new file mode 100644 index 000000000..7bfa758e2 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go @@ -0,0 +1,102 @@ +package integration + +import ( + "io" + "time" + + "net/url" + + "sigs.k8s.io/testing_frameworks/integration/internal" +) + +// Etcd knows how to run an etcd server. +type Etcd struct { + // URL is the address the Etcd should listen on for client connections. + // + // If this is not specified, we default to a random free port on localhost. + URL *url.URL + + // Path is the path to the etcd binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_ETCD environment variable, and the default + // test assets directory. See the "Binaries" section above (in doc.go) for + // details. + Path string + + // Args is a list of arguments which will passed to the Etcd binary. Before + // they are passed on, the`y will be evaluated as go-template strings. This + // means you can use fields which are defined and exported on this Etcd + // struct (e.g. "--data-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the Etcd's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to caluclated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the Etcd will be + // used. + Args []string + + // DataDir is a path to a directory in which etcd can store its state. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + DataDir string + + // StartTimeout, StopTimeout specify the time the Etcd is allowed to + // take when starting and stopping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where Etcd should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + processState *internal.ProcessState +} + +// Start starts the etcd, waits for it to come up, and returns an error, if one +// occoured. +func (e *Etcd) Start() error { + var err error + + e.processState = &internal.ProcessState{} + + e.processState.DefaultedProcessInput, err = internal.DoDefaulting( + "etcd", + e.URL, + e.DataDir, + e.Path, + e.StartTimeout, + e.StopTimeout, + ) + if err != nil { + return err + } + + e.processState.StartMessage = internal.GetEtcdStartMessage(e.processState.URL) + + e.URL = &e.processState.URL + e.DataDir = e.processState.Dir + e.Path = e.processState.Path + e.StartTimeout = e.processState.StartTimeout + e.StopTimeout = e.processState.StopTimeout + + e.processState.Args, err = internal.RenderTemplates( + internal.DoEtcdArgDefaulting(e.Args), e, + ) + if err != nil { + return err + } + + return e.processState.Start(e.Out, e.Err) +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the DataDir if necessary. +func (e *Etcd) Stop() error { + return e.processState.Stop() +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/integration_suite_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/integration_suite_test.go new file mode 100644 index 000000000..65e45f995 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/integration_suite_test.go @@ -0,0 +1,14 @@ +package integration_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestIntegration(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + RunSpecs(t, "Integration Framework Unit Tests") +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/address_manager.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/address_manager.go new file mode 100644 index 000000000..7ad4ab7ea --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/address_manager.go @@ -0,0 +1,53 @@ +package internal + +import ( + "fmt" + "net" +) + +// AddressManager allocates a new address (interface & port) a process +// can bind and keeps track of that. +type AddressManager struct { + port int + host string +} + +// Initialize returns a address a process can listen on. It returns +// a tuple consisting of a free port and the hostname resolved to its IP. +func (d *AddressManager) Initialize() (port int, resolvedHost string, err error) { + if d.port != 0 { + return 0, "", fmt.Errorf("this AddressManager is already initialized") + } + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return + } + d.port = l.Addr().(*net.TCPAddr).Port + defer func() { + err = l.Close() + }() + d.host = addr.IP.String() + return d.port, d.host, nil +} + +// Port returns the port that this AddressManager is managing. Port returns an +// error if this AddressManager has not yet been initialized. +func (d *AddressManager) Port() (int, error) { + if d.port == 0 { + return 0, fmt.Errorf("this AdressManager is not initialized yet") + } + return d.port, nil +} + +// Host returns the host that this AddressManager is managing. Host returns an +// error if this AddressManager has not yet been initialized. +func (d *AddressManager) Host() (string, error) { + if d.host == "" { + return "", fmt.Errorf("this AdressManager is not initialized yet") + } + return d.host, nil +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/address_manager_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/address_manager_test.go new file mode 100644 index 000000000..c6bb3a33d --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/address_manager_test.go @@ -0,0 +1,71 @@ +package internal_test + +import ( + . "sigs.k8s.io/testing_frameworks/integration/internal" + + "fmt" + "net" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("AddressManager", func() { + var addressManager *AddressManager + BeforeEach(func() { + addressManager = &AddressManager{} + }) + + Describe("Initialize", func() { + It("returns a free port and an address to bind to", func() { + port, host, err := addressManager.Initialize() + + Expect(err).NotTo(HaveOccurred()) + Expect(host).To(Equal("127.0.0.1")) + Expect(port).NotTo(Equal(0)) + + addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("%s:%d", host, port)) + Expect(err).NotTo(HaveOccurred()) + l, err := net.ListenTCP("tcp", addr) + defer func() { + Expect(l.Close()).To(Succeed()) + }() + Expect(err).NotTo(HaveOccurred()) + }) + + Context("initialized multiple times", func() { + It("fails", func() { + _, _, err := addressManager.Initialize() + Expect(err).NotTo(HaveOccurred()) + _, _, err = addressManager.Initialize() + Expect(err).To(MatchError(ContainSubstring("already initialized"))) + }) + }) + }) + Describe("Port", func() { + It("returns an error if Initialize has not been called yet", func() { + _, err := addressManager.Port() + Expect(err).To(MatchError(ContainSubstring("not initialized yet"))) + }) + It("returns the same port as previously allocated by Initialize", func() { + expectedPort, _, err := addressManager.Initialize() + Expect(err).NotTo(HaveOccurred()) + actualPort, err := addressManager.Port() + Expect(err).NotTo(HaveOccurred()) + Expect(actualPort).To(Equal(expectedPort)) + }) + }) + Describe("Host", func() { + It("returns an error if Initialize has not been called yet", func() { + _, err := addressManager.Host() + Expect(err).To(MatchError(ContainSubstring("not initialized yet"))) + }) + It("returns the same port as previously allocated by Initialize", func() { + _, expectedHost, err := addressManager.Initialize() + Expect(err).NotTo(HaveOccurred()) + actualHost, err := addressManager.Host() + Expect(err).NotTo(HaveOccurred()) + Expect(actualHost).To(Equal(expectedHost)) + }) + }) +}) diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/apiserver.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/apiserver.go new file mode 100644 index 000000000..a5256aee0 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/apiserver.go @@ -0,0 +1,17 @@ +package internal + +var APIServerDefaultArgs = []string{ + "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", + "--cert-dir={{ .CertDir }}", + "--insecure-port={{ if .URL }}{{ .URL.Port }}{{ end }}", + "--insecure-bind-address={{ if .URL }}{{ .URL.Hostname }}{{ end }}", + "--secure-port=0", +} + +func DoAPIServerArgDefaulting(args []string) []string { + if len(args) != 0 { + return args + } + + return APIServerDefaultArgs +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/apiserver_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/apiserver_test.go new file mode 100644 index 000000000..f82896a9e --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/apiserver_test.go @@ -0,0 +1,23 @@ +package internal_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "sigs.k8s.io/testing_frameworks/integration/internal" +) + +var _ = Describe("Apiserver", func() { + It("defaults Args if they are empty", func() { + initialArgs := []string{} + defaultedArgs := DoAPIServerArgDefaulting(initialArgs) + Expect(defaultedArgs).To(BeEquivalentTo(APIServerDefaultArgs)) + }) + + It("keeps Args as is if they are not empty", func() { + initialArgs := []string{"--one", "--two=2"} + defaultedArgs := DoAPIServerArgDefaulting(initialArgs) + Expect(defaultedArgs).To(BeEquivalentTo([]string{ + "--one", "--two=2", + })) + }) +}) diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/arguments.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/arguments.go new file mode 100644 index 000000000..00fe7935a --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/arguments.go @@ -0,0 +1,28 @@ +package internal + +import ( + "bytes" + "html/template" +) + +func RenderTemplates(argTemplates []string, data interface{}) (args []string, err error) { + var t *template.Template + + for _, arg := range argTemplates { + t, err = template.New(arg).Parse(arg) + if err != nil { + args = nil + return + } + + buf := &bytes.Buffer{} + err = t.Execute(buf, data) + if err != nil { + args = nil + return + } + args = append(args, buf.String()) + } + + return +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/arguments_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/arguments_test.go new file mode 100644 index 000000000..25220a461 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/arguments_test.go @@ -0,0 +1,83 @@ +package internal_test + +import ( + "net/url" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/testing_frameworks/integration/internal" +) + +var _ = Describe("Arguments", func() { + It("templates URLs", func() { + templates := []string{ + "plain URL: {{ .SomeURL }}", + "method on URL: {{ .SomeURL.Hostname }}", + "empty URL: {{ .EmptyURL }}", + "handled empty URL: {{- if .EmptyURL }}{{ .EmptyURL }}{{ end }}", + } + data := struct { + SomeURL *url.URL + EmptyURL *url.URL + }{ + &url.URL{Scheme: "https", Host: "the.host.name:3456"}, + nil, + } + + out, err := RenderTemplates(templates, data) + Expect(err).NotTo(HaveOccurred()) + Expect(out).To(BeEquivalentTo([]string{ + "plain URL: https://the.host.name:3456", + "method on URL: the.host.name", + "empty URL: <nil>", + "handled empty URL:", + })) + }) + + It("templates strings", func() { + templates := []string{ + "a string: {{ .SomeString }}", + "empty string: {{- .EmptyString }}", + } + data := struct { + SomeString string + EmptyString string + }{ + "this is some random string", + "", + } + + out, err := RenderTemplates(templates, data) + Expect(err).NotTo(HaveOccurred()) + Expect(out).To(BeEquivalentTo([]string{ + "a string: this is some random string", + "empty string:", + })) + }) + + It("has no access to unexported fields", func() { + templates := []string{ + "this is just a string", + "this blows up {{ .test }}", + } + data := struct{ test string }{"ooops private"} + + out, err := RenderTemplates(templates, data) + Expect(out).To(BeEmpty()) + Expect(err).To(MatchError( + ContainSubstring("is an unexported field of struct"), + )) + }) + + It("errors when field cannot be found", func() { + templates := []string{"this does {{ .NotExist }}"} + data := struct{ Unused string }{"unused"} + + out, err := RenderTemplates(templates, data) + Expect(out).To(BeEmpty()) + Expect(err).To(MatchError( + ContainSubstring("can't evaluate field"), + )) + }) +}) diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/bin_path_finder.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/bin_path_finder.go new file mode 100644 index 000000000..5597e4ba0 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/bin_path_finder.go @@ -0,0 +1,35 @@ +package internal + +import ( + "os" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +var assetsPath string + +func init() { + _, thisFile, _, ok := runtime.Caller(0) + if !ok { + panic("Could not determine the path of the BinPathFinder") + } + assetsPath = filepath.Join(filepath.Dir(thisFile), "..", "assets", "bin") +} + +// BinPathFinder checks the an environment variable, derived from the symbolic name, +// and falls back to a default assets location when this variable is not set +func BinPathFinder(symbolicName string) (binPath string) { + punctuationPattern := regexp.MustCompile("[^A-Z0-9]+") + sanitizedName := punctuationPattern.ReplaceAllString(strings.ToUpper(symbolicName), "_") + leadingNumberPattern := regexp.MustCompile("^[0-9]+") + sanitizedName = leadingNumberPattern.ReplaceAllString(sanitizedName, "") + envVar := "TEST_ASSET_" + sanitizedName + + if val, ok := os.LookupEnv(envVar); ok { + return val + } + + return filepath.Join(assetsPath, symbolicName) +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/bin_path_finder_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/bin_path_finder_test.go new file mode 100644 index 000000000..490b9e5b5 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/bin_path_finder_test.go @@ -0,0 +1,66 @@ +package internal + +import ( + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BinPathFinder", func() { + Context("when relying on the default assets path", func() { + var ( + previousAssetsPath string + ) + BeforeEach(func() { + previousAssetsPath = assetsPath + assetsPath = "/some/path/assets/bin" + }) + AfterEach(func() { + assetsPath = previousAssetsPath + }) + It("returns the default path when no env var is configured", func() { + binPath := BinPathFinder("some_bin") + Expect(binPath).To(Equal("/some/path/assets/bin/some_bin")) + }) + }) + + Context("when environment is configured", func() { + var ( + previousValue string + wasSet bool + ) + BeforeEach(func() { + envVarName := "TEST_ASSET_ANOTHER_SYMBOLIC_NAME" + if val, ok := os.LookupEnv(envVarName); ok { + previousValue = val + wasSet = true + } + os.Setenv(envVarName, "/path/to/some_bin.exe") + }) + AfterEach(func() { + if wasSet { + os.Setenv("TEST_ASSET_ANOTHER_SYMBOLIC_NAME", previousValue) + } else { + os.Unsetenv("TEST_ASSET_ANOTHER_SYMBOLIC_NAME") + } + }) + It("returns the path from the env", func() { + binPath := BinPathFinder("another_symbolic_name") + Expect(binPath).To(Equal("/path/to/some_bin.exe")) + }) + + It("sanitizes the environment variable name", func() { + By("cleaning all non-underscore punctuation") + binPath := BinPathFinder("another-symbolic name") + Expect(binPath).To(Equal("/path/to/some_bin.exe")) + binPath = BinPathFinder("another+symbolic\\name") + Expect(binPath).To(Equal("/path/to/some_bin.exe")) + binPath = BinPathFinder("another=symbolic.name") + Expect(binPath).To(Equal("/path/to/some_bin.exe")) + By("removing numbers from the beginning of the name") + binPath = BinPathFinder("12another_symbolic_name") + Expect(binPath).To(Equal("/path/to/some_bin.exe")) + }) + }) +}) diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/etcd.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/etcd.go new file mode 100644 index 000000000..4c948cfd1 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/etcd.go @@ -0,0 +1,36 @@ +package internal + +import "net/url" + +var EtcdDefaultArgs = []string{ + "--listen-peer-urls=http://localhost:0", + "--advertise-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--listen-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--data-dir={{ .DataDir }}", +} + +func DoEtcdArgDefaulting(args []string) []string { + if len(args) != 0 { + return args + } + + return EtcdDefaultArgs +} + +func isSecureScheme(scheme string) bool { + // https://github.com/coreos/etcd/blob/d9deeff49a080a88c982d328ad9d33f26d1ad7b6/pkg/transport/listener.go#L53 + if scheme == "https" || scheme == "unixs" { + return true + } + return false +} + +func GetEtcdStartMessage(listenUrl url.URL) string { + if isSecureScheme(listenUrl.Scheme) { + // https://github.com/coreos/etcd/blob/a7f1fbe00ec216fcb3a1919397a103b41dca8413/embed/serve.go#L167 + return "serving client requests on " + listenUrl.Hostname() + } + + // https://github.com/coreos/etcd/blob/a7f1fbe00ec216fcb3a1919397a103b41dca8413/embed/serve.go#L124 + return "serving insecure client requests on " + listenUrl.Hostname() +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/etcd_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/etcd_test.go new file mode 100644 index 000000000..449b1ac6c --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/etcd_test.go @@ -0,0 +1,49 @@ +package internal_test + +import ( + "net/url" + + . "sigs.k8s.io/testing_frameworks/integration/internal" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Etcd", func() { + It("defaults Args if they are empty", func() { + initialArgs := []string{} + defaultedArgs := DoEtcdArgDefaulting(initialArgs) + Expect(defaultedArgs).To(BeEquivalentTo(EtcdDefaultArgs)) + }) + + It("keeps Args as is if they are not empty", func() { + initialArgs := []string{"--eins", "--zwei=2"} + defaultedArgs := DoEtcdArgDefaulting(initialArgs) + Expect(defaultedArgs).To(BeEquivalentTo([]string{ + "--eins", "--zwei=2", + })) + }) +}) + +var _ = Describe("GetEtcdStartMessage()", func() { + Context("when using a non tls URL", func() { + It("generates valid start message", func() { + url := url.URL{ + Scheme: "http", + Host: "some.insecure.host:1234", + } + message := GetEtcdStartMessage(url) + Expect(message).To(Equal("serving insecure client requests on some.insecure.host")) + }) + }) + Context("when using a tls URL", func() { + It("generates valid start message", func() { + url := url.URL{ + Scheme: "https", + Host: "some.secure.host:8443", + } + message := GetEtcdStartMessage(url) + Expect(message).To(Equal("serving client requests on some.secure.host")) + }) + }) +}) diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/apiserver_integration_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/apiserver_integration_test.go new file mode 100644 index 000000000..9412f37ea --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/apiserver_integration_test.go @@ -0,0 +1,24 @@ +package integration_tests + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/testing_frameworks/integration" +) + +var _ = Describe("APIServer", func() { + Context("when no EtcdURL is provided", func() { + It("does not panic", func() { + apiServer := &APIServer{} + + starter := func() { + Expect(apiServer.Start()).To( + MatchError(ContainSubstring("expected EtcdURL to be configured")), + ) + } + + Expect(starter).NotTo(Panic()) + }) + }) +}) diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/doc.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/doc.go new file mode 100644 index 000000000..dec5a9197 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/doc.go @@ -0,0 +1,7 @@ +/* +Package integrarion_tests is holding the integration tests to run against the +framework. + +This file's only purpose is to make godep happy. +*/ +package integration_tests diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/etcd_integration_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/etcd_integration_test.go new file mode 100644 index 000000000..c02e73dc6 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/etcd_integration_test.go @@ -0,0 +1,66 @@ +package integration_tests + +import ( + "bytes" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/testing_frameworks/integration" +) + +var _ = Describe("Etcd", func() { + It("sets the properties after defaulting", func() { + etcd := &Etcd{} + + Expect(etcd.URL).To(BeZero()) + Expect(etcd.DataDir).To(BeZero()) + Expect(etcd.Path).To(BeZero()) + Expect(etcd.StartTimeout).To(BeZero()) + Expect(etcd.StopTimeout).To(BeZero()) + + Expect(etcd.Start()).To(Succeed()) + defer func() { + Expect(etcd.Stop()).To(Succeed()) + }() + + Expect(etcd.URL).NotTo(BeZero()) + Expect(etcd.DataDir).NotTo(BeZero()) + Expect(etcd.Path).NotTo(BeZero()) + Expect(etcd.StartTimeout).NotTo(BeZero()) + Expect(etcd.StopTimeout).NotTo(BeZero()) + }) + + It("can inspect IO", func() { + stderr := &bytes.Buffer{} + etcd := &Etcd{ + Err: stderr, + } + + Expect(etcd.Start()).To(Succeed()) + defer func() { + Expect(etcd.Stop()).To(Succeed()) + }() + + Expect(stderr.String()).NotTo(BeEmpty()) + }) + + It("can use user specified Args", func() { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + etcd := &Etcd{ + Args: []string{"--help"}, + Out: stdout, + Err: stderr, + StartTimeout: 500 * time.Millisecond, + } + + // it will timeout, as we'll never see the "startup message" we are waiting + // for on StdErr + Expect(etcd.Start()).To(MatchError(ContainSubstring("timeout"))) + + Expect(stdout.String()).To(ContainSubstring("member flags")) + Expect(stderr.String()).To(ContainSubstring("usage: etcd")) + }) +}) diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/integration_suite_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/integration_suite_test.go new file mode 100644 index 000000000..d61123839 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/integration_suite_test.go @@ -0,0 +1,14 @@ +package integration_tests + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestIntegration(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + RunSpecs(t, "Integration Framework Integration Tests") +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/integration_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/integration_test.go new file mode 100644 index 000000000..3a4eacb8c --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/integration_tests/integration_test.go @@ -0,0 +1,160 @@ +package integration_tests + +import ( + "fmt" + "io/ioutil" + "net" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/testing_frameworks/integration" +) + +var _ = Describe("The Testing Framework", func() { + var controlPlane *integration.ControlPlane + + AfterEach(func() { + Expect(controlPlane.Stop()).To(Succeed()) + }) + + It("Successfully manages the control plane lifecycle", func() { + var err error + + controlPlane = &integration.ControlPlane{} + + By("Starting all the control plane processes") + err = controlPlane.Start() + Expect(err).NotTo(HaveOccurred(), "Expected controlPlane to start successfully") + + apiServerURL := controlPlane.APIURL() + etcdClientURL := controlPlane.APIServer.EtcdURL + + isEtcdListeningForClients := isSomethingListeningOnPort(etcdClientURL.Host) + isAPIServerListening := isSomethingListeningOnPort(apiServerURL.Host) + + By("Ensuring Etcd is listening") + Expect(isEtcdListeningForClients()).To(BeTrue(), + fmt.Sprintf("Expected Etcd to listen for clients on %s,", etcdClientURL.Host)) + + By("Ensuring APIServer is listening") + CheckAPIServerIsReady(controlPlane.KubeCtl()) + + By("getting a kubectl & run it against the control plane") + kubeCtl := controlPlane.KubeCtl() + stdout, stderr, err := kubeCtl.Run("get", "pods") + Expect(err).NotTo(HaveOccurred()) + bytes, err := ioutil.ReadAll(stdout) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).To(BeEmpty()) + Expect(stderr).To(ContainSubstring("No resources found.")) + + By("Stopping all the control plane processes") + err = controlPlane.Stop() + Expect(err).NotTo(HaveOccurred(), "Expected controlPlane to stop successfully") + + By("Ensuring Etcd is not listening anymore") + Expect(isEtcdListeningForClients()).To(BeFalse(), "Expected Etcd not to listen for clients anymore") + + By("Ensuring APIServer is not listening anymore") + Expect(isAPIServerListening()).To(BeFalse(), "Expected APIServer not to listen anymore") + + By("Not erroring when stopping a stopped ControlPlane") + Expect(func() { + Expect(controlPlane.Stop()).To(Succeed()) + }).NotTo(Panic()) + }) + + Context("when Stop() is called on the control plane", func() { + Context("but the control plane is not started yet", func() { + It("does not error", func() { + controlPlane = &integration.ControlPlane{} + + stoppingTheControlPlane := func() { + Expect(controlPlane.Stop()).To(Succeed()) + } + + Expect(stoppingTheControlPlane).NotTo(Panic()) + }) + }) + }) + + Context("when the control plane is configured with its components", func() { + It("it does not default them", func() { + myEtcd, myAPIServer := + &integration.Etcd{StartTimeout: 15 * time.Second}, + &integration.APIServer{StopTimeout: 16 * time.Second} + + controlPlane = &integration.ControlPlane{ + Etcd: myEtcd, + APIServer: myAPIServer, + } + + Expect(controlPlane.Start()).To(Succeed()) + Expect(controlPlane.Etcd).To(BeIdenticalTo(myEtcd)) + Expect(controlPlane.APIServer).To(BeIdenticalTo(myAPIServer)) + Expect(controlPlane.Etcd.StartTimeout).To(Equal(15 * time.Second)) + Expect(controlPlane.APIServer.StopTimeout).To(Equal(16 * time.Second)) + }) + }) + + Measure("It should be fast to bring up and tear down the control plane", func(b Benchmarker) { + b.Time("lifecycle", func() { + controlPlane = &integration.ControlPlane{} + + controlPlane.Start() + controlPlane.Stop() + }) + }, 10) +}) + +type portChecker func() bool + +func isSomethingListeningOnPort(hostAndPort string) portChecker { + return func() bool { + conn, err := net.DialTimeout("tcp", hostAndPort, 1*time.Second) + + if err != nil { + return false + } + conn.Close() + return true + } +} + +// CheckAPIServerIsReady checks if the APIServer is really ready and not only +// listening. +// +// While porting some tests in k/k +// (https://github.com/hoegaarden/kubernetes/blob/287fdef1bd98646bc521f4433c1009936d5cf7a2/hack/make-rules/test-cmd-util.sh#L1524-L1535) +// we found, that the APIServer was +// listening but not serving certain APIs yet. +// +// We changed the readiness detection in the PR at +// https://github.com/kubernetes-sigs/testing_frameworks/pull/48. To confirm +// this changed behaviour does what it should do, we used the same test as in +// k/k's test-cmd (see link above) and test if certain well-known known APIs +// are actually available. +func CheckAPIServerIsReady(kubeCtl *integration.KubeCtl) { + expectedAPIS := []string{ + "/api/v1/namespaces/default/pods 200 OK", + "/api/v1/namespaces/default/replicationcontrollers 200 OK", + "/api/v1/namespaces/default/services 200 OK", + "/apis/apps/v1/namespaces/default/daemonsets 200 OK", + "/apis/apps/v1/namespaces/default/deployments 200 OK", + "/apis/apps/v1/namespaces/default/replicasets 200 OK", + "/apis/apps/v1/namespaces/default/statefulsets 200 OK", + "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200", + "/apis/batch/v1/namespaces/default/jobs 200 OK", + } + + _, output, err := kubeCtl.Run("--v=6", "--namespace", "default", "get", "all", "--chunk-size=0") + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + stdoutBytes, err := ioutil.ReadAll(output) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + for _, api := range expectedAPIS { + ExpectWithOffset(1, string(stdoutBytes)).To(ContainSubstring(api)) + } +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/internal_suite_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/internal_suite_test.go new file mode 100644 index 000000000..d673cc4c9 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/internal_suite_test.go @@ -0,0 +1,14 @@ +package internal_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestInternal(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + RunSpecs(t, "Internal Suite") +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go new file mode 100644 index 000000000..620c6a99c --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go @@ -0,0 +1,202 @@ +package internal + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "time" + + "github.com/onsi/gomega/gbytes" + "github.com/onsi/gomega/gexec" +) + +type ProcessState struct { + DefaultedProcessInput + Session *gexec.Session + // Healthcheck Endpoint. If we get http.StatusOK from this endpoint, we + // assume the process is ready to operate. E.g. "/healthz". If this is set, + // we ignore StartMessage. + HealthCheckEndpoint string + // HealthCheckPollInterval is the interval which will be used for polling the + // HealthCheckEndpoint. + // If left empty it will default to 100 Milliseconds. + HealthCheckPollInterval time.Duration + // StartMessage is the message to wait for on stderr. If we recieve this + // message, we assume the process is ready to operate. Ignored if + // HealthCheckEndpoint is specified. + // + // The usage of StartMessage is discouraged, favour HealthCheckEndpoint + // instead! + // + // Deprecated: Use HealthCheckEndpoint in favour of StartMessage + StartMessage string + Args []string +} + +type DefaultedProcessInput struct { + URL url.URL + Dir string + DirNeedsCleaning bool + Path string + StopTimeout time.Duration + StartTimeout time.Duration +} + +func DoDefaulting( + name string, + listenUrl *url.URL, + dir string, + path string, + startTimeout time.Duration, + stopTimeout time.Duration, +) (DefaultedProcessInput, error) { + defaults := DefaultedProcessInput{ + Dir: dir, + Path: path, + StartTimeout: startTimeout, + StopTimeout: stopTimeout, + } + + if listenUrl == nil { + am := &AddressManager{} + port, host, err := am.Initialize() + if err != nil { + return DefaultedProcessInput{}, err + } + defaults.URL = url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", host, port), + } + } else { + defaults.URL = *listenUrl + } + + if dir == "" { + newDir, err := ioutil.TempDir("", "k8s_test_framework_") + if err != nil { + return DefaultedProcessInput{}, err + } + defaults.Dir = newDir + defaults.DirNeedsCleaning = true + } + + if path == "" { + if name == "" { + return DefaultedProcessInput{}, fmt.Errorf("must have at least one of name or path") + } + defaults.Path = BinPathFinder(name) + } + + if startTimeout == 0 { + defaults.StartTimeout = 20 * time.Second + } + + if stopTimeout == 0 { + defaults.StopTimeout = 20 * time.Second + } + + return defaults, nil +} + +type stopChannel chan struct{} + +func (ps *ProcessState) Start(stdout, stderr io.Writer) (err error) { + command := exec.Command(ps.Path, ps.Args...) + + ready := make(chan bool) + timedOut := time.After(ps.StartTimeout) + var pollerStopCh stopChannel + + if ps.HealthCheckEndpoint != "" { + healthCheckURL := ps.URL + healthCheckURL.Path = ps.HealthCheckEndpoint + pollerStopCh = make(stopChannel) + go pollURLUntilOK(healthCheckURL, ps.HealthCheckPollInterval, ready, pollerStopCh) + } else { + startDetectStream := gbytes.NewBuffer() + ready = startDetectStream.Detect(ps.StartMessage) + stderr = safeMultiWriter(stderr, startDetectStream) + } + + ps.Session, err = gexec.Start(command, stdout, stderr) + if err != nil { + return err + } + + select { + case <-ready: + return nil + case <-timedOut: + if pollerStopCh != nil { + close(pollerStopCh) + } + if ps.Session != nil { + ps.Session.Terminate() + } + return fmt.Errorf("timeout waiting for process %s to start", path.Base(ps.Path)) + } +} + +func safeMultiWriter(writers ...io.Writer) io.Writer { + safeWriters := []io.Writer{} + for _, w := range writers { + if w != nil { + safeWriters = append(safeWriters, w) + } + } + return io.MultiWriter(safeWriters...) +} + +func pollURLUntilOK(url url.URL, interval time.Duration, ready chan bool, stopCh stopChannel) { + if interval <= 0 { + interval = 100 * time.Millisecond + } + for { + res, err := http.Get(url.String()) + if err == nil && res.StatusCode == http.StatusOK { + ready <- true + return + } + + select { + case <-stopCh: + return + default: + time.Sleep(interval) + } + } +} + +func (ps *ProcessState) Stop() error { + if ps.Session == nil { + return nil + } + + // gexec's Session methods (Signal, Kill, ...) do not check if the Process is + // nil, so we are doing this here for now. + // This should probably be fixed in gexec. + if ps.Session.Command.Process == nil { + return nil + } + + detectedStop := ps.Session.Terminate().Exited + timedOut := time.After(ps.StopTimeout) + + select { + case <-detectedStop: + break + case <-timedOut: + return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) + } + + if ps.DirNeedsCleaning { + return os.RemoveAll(ps.Dir) + } + + return nil +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process_test.go new file mode 100644 index 000000000..91dfc888d --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process_test.go @@ -0,0 +1,368 @@ +package internal_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + "github.com/onsi/gomega/ghttp" + . "sigs.k8s.io/testing_frameworks/integration/internal" +) + +var _ = Describe("Start method", func() { + var ( + processState *ProcessState + ) + BeforeEach(func() { + processState = &ProcessState{} + processState.Path = "bash" + processState.Args = simpleBashScript + }) + + It("can start a process", func() { + processState.StartTimeout = 10 * time.Second + processState.StartMessage = "loop 5" + + err := processState.Start(nil, nil) + Expect(err).NotTo(HaveOccurred()) + + Consistently(processState.Session.ExitCode).Should(BeNumerically("==", -1)) + }) + + Context("when a health check endpoint is provided", func() { + var server *ghttp.Server + BeforeEach(func() { + server = ghttp.NewServer() + }) + AfterEach(func() { + server.Close() + }) + + Context("when the healthcheck returns ok", func() { + BeforeEach(func() { + server.RouteToHandler("GET", "/healthz", ghttp.RespondWith(http.StatusOK, "")) + }) + + It("hits the endpoint, and successfully starts", func() { + processState.HealthCheckEndpoint = "/healthz" + processState.StartTimeout = 100 * time.Millisecond + processState.URL = getServerURL(server) + + err := processState.Start(nil, nil) + Expect(err).NotTo(HaveOccurred()) + Expect(server.ReceivedRequests()).To(HaveLen(1)) + Consistently(processState.Session.ExitCode).Should(BeNumerically("==", -1)) + }) + }) + + Context("when the healthcheck always returns failure", func() { + BeforeEach(func() { + server.RouteToHandler("GET", "/healthz", ghttp.RespondWith(http.StatusInternalServerError, "")) + }) + It("returns a timeout error and stops health API checker", func() { + processState.HealthCheckEndpoint = "/healthz" + processState.StartTimeout = 500 * time.Millisecond + processState.URL = getServerURL(server) + + err := processState.Start(nil, nil) + Expect(err).To(MatchError(ContainSubstring("timeout"))) + + nrReceivedRequests := len(server.ReceivedRequests()) + Expect(nrReceivedRequests).To(Equal(5)) + time.Sleep(200 * time.Millisecond) + Expect(nrReceivedRequests).To(Equal(5)) + }) + }) + + Context("when the healthcheck isn't even listening", func() { + BeforeEach(func() { + server.Close() + }) + + It("returns a timeout error", func() { + processState.HealthCheckEndpoint = "/healthz" + processState.StartTimeout = 500 * time.Millisecond + + am := &AddressManager{} + port, host, err := am.Initialize() + Expect(err).NotTo(HaveOccurred()) + + processState.URL = url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", host, port), + } + + err = processState.Start(nil, nil) + Expect(err).To(MatchError(ContainSubstring("timeout"))) + }) + }) + + Context("when the healthcheck fails initially but succeeds eventually", func() { + BeforeEach(func() { + server.AppendHandlers( + ghttp.RespondWith(http.StatusInternalServerError, ""), + ghttp.RespondWith(http.StatusInternalServerError, ""), + ghttp.RespondWith(http.StatusInternalServerError, ""), + ghttp.RespondWith(http.StatusOK, ""), + ) + }) + + It("hits the endpoint repeatedly, and successfully starts", func() { + processState.HealthCheckEndpoint = "/healthz" + processState.StartTimeout = 20 * time.Second + processState.URL = getServerURL(server) + + err := processState.Start(nil, nil) + Expect(err).NotTo(HaveOccurred()) + Expect(server.ReceivedRequests()).To(HaveLen(4)) + Consistently(processState.Session.ExitCode).Should(BeNumerically("==", -1)) + }) + + Context("when the polling interval is not configured", func() { + It("uses the default interval for polling", func() { + processState.HealthCheckEndpoint = "/helathz" + processState.StartTimeout = 300 * time.Millisecond + processState.URL = getServerURL(server) + + Expect(processState.Start(nil, nil)).To(MatchError(ContainSubstring("timeout"))) + Expect(server.ReceivedRequests()).To(HaveLen(3)) + }) + }) + + Context("when the polling interval is configured", func() { + BeforeEach(func() { + processState.HealthCheckPollInterval = time.Millisecond * 20 + }) + + It("hits the endpoint in the configured interval", func() { + processState.HealthCheckEndpoint = "/healthz" + processState.StartTimeout = 3 * processState.HealthCheckPollInterval + processState.URL = getServerURL(server) + + Expect(processState.Start(nil, nil)).To(MatchError(ContainSubstring("timeout"))) + Expect(server.ReceivedRequests()).To(HaveLen(3)) + }) + }) + }) + }) + + Context("when a health check endpoint is not provided", func() { + + Context("when process takes too long to start", func() { + It("returns a timeout error", func() { + processState.StartTimeout = 200 * time.Millisecond + processState.StartMessage = "loop 5000" + + err := processState.Start(nil, nil) + Expect(err).To(MatchError(ContainSubstring("timeout"))) + + Eventually(processState.Session.ExitCode).Should(Equal(143)) + }) + }) + + Context("when the command cannot be started", func() { + BeforeEach(func() { + processState = &ProcessState{} + processState.Path = "/nonexistent" + }) + + It("propagates the error", func() { + err := processState.Start(nil, nil) + + Expect(os.IsNotExist(err)).To(BeTrue()) + }) + + Context("but Stop() is called on it", func() { + It("does not panic", func() { + processState.Start(nil, nil) + + stoppingFailedProcess := func() { + Expect(processState.Stop()).To(Succeed()) + } + + Expect(stoppingFailedProcess).NotTo(Panic()) + }) + }) + }) + }) + + Context("when IO is configured", func() { + It("can inspect stdout & stderr", func() { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + + processState.Args = []string{ + "-c", + ` + echo 'this is stderr' >&2 + echo 'that is stdout' + echo 'i started' >&2 + `, + } + processState.StartMessage = "i started" + processState.StartTimeout = 1 * time.Second + + Expect(processState.Start(stdout, stderr)).To(Succeed()) + + Expect(stdout.String()).To(Equal("that is stdout\n")) + Expect(stderr.String()).To(Equal("this is stderr\ni started\n")) + }) + }) +}) + +var _ = Describe("Stop method", func() { + Context("when Stop() is called", func() { + var ( + processState *ProcessState + ) + BeforeEach(func() { + var err error + processState = &ProcessState{} + processState.Session, err = gexec.Start(getSimpleCommand(), nil, nil) + Expect(err).NotTo(HaveOccurred()) + processState.StopTimeout = 10 * time.Second + }) + + It("stops the process", func() { + Expect(processState.Stop()).To(Succeed()) + }) + + Context("multiple times", func() { + It("does not error or panic on consecutive calls", func() { + stoppingTheProcess := func() { + Expect(processState.Stop()).To(Succeed()) + } + Expect(stoppingTheProcess).NotTo(Panic()) + Expect(stoppingTheProcess).NotTo(Panic()) + Expect(stoppingTheProcess).NotTo(Panic()) + }) + }) + }) + + Context("when the command cannot be stopped", func() { + It("returns a timeout error", func() { + var err error + + processState := &ProcessState{} + processState.Session, err = gexec.Start(getSimpleCommand(), nil, nil) + Expect(err).NotTo(HaveOccurred()) + processState.Session.Exited = make(chan struct{}) + processState.StopTimeout = 200 * time.Millisecond + + Expect(processState.Stop()).To(MatchError(ContainSubstring("timeout"))) + }) + }) + + Context("when the directory needs to be cleaned up", func() { + It("removes the directory", func() { + var err error + + processState := &ProcessState{} + processState.Session, err = gexec.Start(getSimpleCommand(), nil, nil) + Expect(err).NotTo(HaveOccurred()) + processState.Dir, err = ioutil.TempDir("", "k8s_test_framework_") + Expect(err).NotTo(HaveOccurred()) + processState.DirNeedsCleaning = true + processState.StopTimeout = 400 * time.Millisecond + + Expect(processState.Stop()).To(Succeed()) + Expect(processState.Dir).NotTo(BeAnExistingFile()) + }) + }) +}) + +var _ = Describe("DoDefaulting", func() { + Context("when all inputs are provided", func() { + It("passes them through", func() { + defaults, err := DoDefaulting( + "some name", + &url.URL{Host: "some.host.to.listen.on"}, + "/some/dir", + "/some/path/to/some/bin", + 20*time.Hour, + 65537*time.Millisecond, + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(defaults.URL).To(Equal(url.URL{Host: "some.host.to.listen.on"})) + Expect(defaults.Dir).To(Equal("/some/dir")) + Expect(defaults.DirNeedsCleaning).To(BeFalse()) + Expect(defaults.Path).To(Equal("/some/path/to/some/bin")) + Expect(defaults.StartTimeout).To(Equal(20 * time.Hour)) + Expect(defaults.StopTimeout).To(Equal(65537 * time.Millisecond)) + }) + }) + + Context("when inputs are empty", func() { + It("defaults them", func() { + defaults, err := DoDefaulting( + "some name", + nil, + "", + "", + 0, + 0, + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(defaults.Dir).To(BeADirectory()) + Expect(os.RemoveAll(defaults.Dir)).To(Succeed()) + Expect(defaults.DirNeedsCleaning).To(BeTrue()) + + Expect(defaults.URL).NotTo(BeZero()) + Expect(defaults.URL.Scheme).To(Equal("http")) + Expect(defaults.URL.Hostname()).NotTo(BeEmpty()) + Expect(defaults.URL.Port()).NotTo(BeEmpty()) + + Expect(defaults.Path).NotTo(BeEmpty()) + + Expect(defaults.StartTimeout).NotTo(BeZero()) + Expect(defaults.StopTimeout).NotTo(BeZero()) + }) + }) + + Context("when neither name nor path are provided", func() { + It("returns an error", func() { + _, err := DoDefaulting( + "", + nil, + "", + "", + 0, + 0, + ) + Expect(err).To(MatchError("must have at least one of name or path")) + }) + }) +}) + +var simpleBashScript = []string{ + "-c", + ` + i=0 + while true + do + echo "loop $i" >&2 + let 'i += 1' + sleep 0.2 + done + `, +} + +func getSimpleCommand() *exec.Cmd { + return exec.Command("bash", simpleBashScript...) +} + +func getServerURL(server *ghttp.Server) url.URL { + url, err := url.Parse(server.URL()) + Expect(err).NotTo(HaveOccurred()) + return *url +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/kubectl.go b/vendor/sigs.k8s.io/testing_frameworks/integration/kubectl.go new file mode 100644 index 000000000..8d2295b09 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/kubectl.go @@ -0,0 +1,47 @@ +package integration + +import ( + "bytes" + "io" + "os/exec" + + "sigs.k8s.io/testing_frameworks/integration/internal" +) + +// KubeCtl is a wrapper around the kubectl binary. +type KubeCtl struct { + // Path where the kubectl binary can be found. + // + // If this is left empty, we will attempt to locate a binary, by checking for + // the TEST_ASSET_KUBECTL environment variable, and the default test assets + // directory. See the "Binaries" section above (in doc.go) for details. + Path string + + // Opts can be used to configure additional flags which will be used each + // time the wrapped binary is called. + // + // For example, you might want to use this to set the URL of the APIServer to + // connect to. + Opts []string +} + +// Run executes the wrapped binary with some preconfigured options and the +// arguments given to this method. It returns Readers for the stdout and +// stderr. +func (k *KubeCtl) Run(args ...string) (stdout, stderr io.Reader, err error) { + if k.Path == "" { + k.Path = internal.BinPathFinder("kubectl") + } + + stdoutBuffer := &bytes.Buffer{} + stderrBuffer := &bytes.Buffer{} + allArgs := append(k.Opts, args...) + + cmd := exec.Command(k.Path, allArgs...) + cmd.Stdout = stdoutBuffer + cmd.Stderr = stderrBuffer + + err = cmd.Run() + + return stdoutBuffer, stderrBuffer, err +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/kubectl_test.go b/vendor/sigs.k8s.io/testing_frameworks/integration/kubectl_test.go new file mode 100644 index 000000000..7dca7a767 --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/kubectl_test.go @@ -0,0 +1,39 @@ +package integration_test + +import ( + "io/ioutil" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/testing_frameworks/integration" +) + +var _ = Describe("Kubectl", func() { + It("runs kubectl", func() { + k := &KubeCtl{Path: "bash"} + args := []string{"-c", "echo 'something'"} + stdout, stderr, err := k.Run(args...) + Expect(err).NotTo(HaveOccurred()) + Expect(stdout).To(ContainSubstring("something")) + bytes, err := ioutil.ReadAll(stderr) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).To(BeEmpty()) + }) + + Context("when the command returns a non-zero exit code", func() { + It("returns an error", func() { + k := &KubeCtl{Path: "bash"} + args := []string{ + "-c", "echo 'this is StdErr' >&2; echo 'but this is StdOut' >&1; exit 66", + } + + stdout, stderr, err := k.Run(args...) + + Expect(err).To(MatchError(ContainSubstring("exit status 66"))) + + Expect(stdout).To(ContainSubstring("but this is StdOut")) + Expect(stderr).To(ContainSubstring("this is StdErr")) + }) + }) +}) diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/scripts/download-binaries.sh b/vendor/sigs.k8s.io/testing_frameworks/integration/scripts/download-binaries.sh new file mode 100755 index 000000000..f9f142fea --- /dev/null +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/scripts/download-binaries.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -eu + +# Use DEBUG=1 ./scripts/download-binaries.sh to get debug output +quiet="-s" +[[ -z "${DEBUG:-""}" ]] || { + set -x + quiet="" +} + +logEnd() { + local msg='done.' + [ "$1" -eq 0 ] || msg='Error downloading assets' + echo "$msg" +} +trap 'logEnd $?' EXIT + +# Use BASE_URL=https://my/binaries/url ./scripts/download-binaries to download +# from a different bucket +: "${BASE_URL:="https://storage.googleapis.com/k8s-c10s-test-binaries"}" + +test_framework_dir="$(cd "$(dirname "$0")/.." ; pwd)" +os="$(uname -s)" +os_lowercase="$(echo "$os" | tr '[:upper:]' '[:lower:]' )" +arch="$(uname -m)" + +dest_dir="${1:-"${test_framework_dir}/assets/bin"}" +etcd_dest="${dest_dir}/etcd" +kubectl_dest="${dest_dir}/kubectl" +kube_apiserver_dest="${dest_dir}/kube-apiserver" + +echo "About to download a couple of binaries. This might take a while..." + +curl $quiet "${BASE_URL}/etcd-${os}-${arch}" --output "$etcd_dest" +curl $quiet "${BASE_URL}/kube-apiserver-${os}-${arch}" --output "$kube_apiserver_dest" + +kubectl_version="$(curl $quiet https://storage.googleapis.com/kubernetes-release/release/stable.txt)" +kubectl_url="https://storage.googleapis.com/kubernetes-release/release/${kubectl_version}/bin/${os_lowercase}/amd64/kubectl" +curl $quiet "$kubectl_url" --output "$kubectl_dest" + +chmod +x "$etcd_dest" "$kubectl_dest" "$kube_apiserver_dest" + +echo "# destination:" +echo "# ${dest_dir}" +echo "# versions:" +echo -n "# etcd: "; "$etcd_dest" --version | head -n 1 +echo -n "# kube-apiserver: "; "$kube_apiserver_dest" --version +echo -n "# kubectl: "; "$kubectl_dest" version --client --short