From ca6d343a799d0a1bce8e6fe14c62fef18e4edaf8 Mon Sep 17 00:00:00 2001 From: iguazio-deploy Date: Wed, 19 Feb 2020 12:16:34 +0000 Subject: [PATCH 01/27] Updated TSDB to v0.9.12-5 --- .../vendor/github.com/v3io/v3io-tsdb/Makefile | 10 +- .../vendor/github.com/v3io/v3io-tsdb/go.mod | 5 +- .../vendor/github.com/v3io/v3io-tsdb/go.sum | 11 +- .../v3io/v3io-tsdb/pkg/appender/appender.go | 33 +- .../v3io/v3io-tsdb/pkg/appender/ingest.go | 16 +- .../v3io/v3io-tsdb/pkg/appender/store.go | 9 +- .../v3io/v3io-tsdb/pkg/chunkenc/vartype.go | 24 +- .../v3io/v3io-tsdb/pkg/chunkenc/xor.go | 13 +- .../v3io/v3io-tsdb/pkg/config/config.go | 18 +- .../v3io/v3io-tsdb/pkg/partmgr/partmgr.go | 62 +- .../pkg/pquerier/chunkIterator_test.go | 19 +- .../v3io/v3io-tsdb/pkg/pquerier/collector.go | 5 +- .../client_aggregates_integration_test.go | 29 +- ...oss_series_aggregation_integration_test.go | 27 +- .../dataframe_query_integration_test.go | 120 +- .../downsample_query_integration_test.go | 4 +- .../integration_test_basic_test.go | 18 - .../query_sql_integration_test.go | 6 +- .../raw_query_integration_test.go | 40 +- .../server_aggregates_integration_test.go | 15 +- .../windowed_aggregation_integration_test.go | 15 +- .../pkg/tsdb/delete_integration_test.go | 1141 +++++++++++++++ .../v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go | 94 +- .../v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go | 530 ++++++- .../pkg/tsdb/v3iotsdb_integration_test.go | 365 +---- .../v3io/v3io-tsdb/pkg/tsdbctl/delete.go | 34 +- .../v3io/v3io-tsdb/pkg/utils/misc.go | 23 +- .../test/benchmark/BenchmarkIngest_test.go | 14 +- .../vendor/github.com/nuclio/errors/errors.go | 285 ---- .../vendor/github.com/nuclio/errors/go.mod | 3 - .../vendor/github.com/nuclio/errors/go.sum | 0 .../nuclio/nuclio-sdk-go/.gitignore | 1 - .../nuclio/nuclio-sdk-go/.travis.yml | 6 - .../github.com/nuclio/nuclio-sdk-go/HACK.md | 6 - .../github.com/nuclio/nuclio-sdk-go/LICENSE | 201 --- .../github.com/nuclio/nuclio-sdk-go/Makefile | 55 - .../github.com/nuclio/nuclio-sdk-go/README.md | 3 - .../nuclio/nuclio-sdk-go/context.go | 49 - .../nuclio/nuclio-sdk-go/databinding.go | 21 - .../github.com/nuclio/nuclio-sdk-go/doc.go | 23 - .../github.com/nuclio/nuclio-sdk-go/errgen.go | 23 - .../github.com/nuclio/nuclio-sdk-go/errors.go | 1223 ----------------- .../github.com/nuclio/nuclio-sdk-go/event.go | 275 ---- .../nuclio/nuclio-sdk-go/gen_errors.go | 183 --- .../nuclio/nuclio-sdk-go/response.go | 25 - .../github.com/nuclio/nuclio-sdk-go/types.go | 20 - .../github.com/nuclio/nuclio-test-go/LICENSE | 201 --- .../nuclio/nuclio-test-go/README.md | 75 - .../github.com/nuclio/nuclio-test-go/event.go | 93 -- .../nuclio/nuclio-test-go/nutest.go | 116 -- .../vendor/github.com/nuclio/zap/README.md | 1 - .../vendor/github.com/nuclio/zap/buffer.go | 143 -- .../vendor/github.com/nuclio/zap/logger.go | 458 ------ .../vendor/github.com/nuclio/zap/mux.go | 139 -- .../v3io/v3io-go/pkg/dataplane/container.go | 5 +- .../v3io/v3io-go/pkg/dataplane/context.go | 5 + .../v3io-go/pkg/dataplane/http/container.go | 4 +- .../v3io-go/pkg/dataplane/http/context.go | 315 ++++- .../v3io/v3io-go/pkg/dataplane/types.go | 27 +- .../v3io/v3io-tsdb/vendor/modules.txt | 2 +- .../vendor/github.com/v3io/v3io-tsdb/Makefile | 10 +- .../vendor/github.com/v3io/v3io-tsdb/go.mod | 5 +- .../vendor/github.com/v3io/v3io-tsdb/go.sum | 11 +- .../v3io/v3io-tsdb/pkg/appender/appender.go | 33 +- .../v3io/v3io-tsdb/pkg/appender/ingest.go | 16 +- .../v3io/v3io-tsdb/pkg/appender/store.go | 9 +- .../v3io/v3io-tsdb/pkg/chunkenc/vartype.go | 24 +- .../v3io/v3io-tsdb/pkg/chunkenc/xor.go | 13 +- .../v3io/v3io-tsdb/pkg/config/config.go | 18 +- .../v3io/v3io-tsdb/pkg/partmgr/partmgr.go | 62 +- .../pkg/pquerier/chunkIterator_test.go | 19 +- .../v3io/v3io-tsdb/pkg/pquerier/collector.go | 5 +- .../client_aggregates_integration_test.go | 29 +- ...oss_series_aggregation_integration_test.go | 27 +- .../dataframe_query_integration_test.go | 120 +- .../downsample_query_integration_test.go | 4 +- .../integration_test_basic_test.go | 18 - .../query_sql_integration_test.go | 6 +- .../raw_query_integration_test.go | 40 +- .../server_aggregates_integration_test.go | 15 +- .../windowed_aggregation_integration_test.go | 15 +- .../pkg/tsdb/delete_integration_test.go | 1141 +++++++++++++++ .../v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go | 94 +- .../v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go | 530 ++++++- .../pkg/tsdb/v3iotsdb_integration_test.go | 365 +---- .../v3io/v3io-tsdb/pkg/tsdbctl/delete.go | 34 +- .../v3io/v3io-tsdb/pkg/utils/misc.go | 23 +- .../test/benchmark/BenchmarkIngest_test.go | 14 +- .../vendor/github.com/nuclio/errors/errors.go | 285 ---- .../vendor/github.com/nuclio/errors/go.mod | 3 - .../vendor/github.com/nuclio/errors/go.sum | 0 .../nuclio/nuclio-sdk-go/.gitignore | 1 - .../nuclio/nuclio-sdk-go/.travis.yml | 6 - .../github.com/nuclio/nuclio-sdk-go/HACK.md | 6 - .../github.com/nuclio/nuclio-sdk-go/LICENSE | 201 --- .../github.com/nuclio/nuclio-sdk-go/Makefile | 55 - .../github.com/nuclio/nuclio-sdk-go/README.md | 3 - .../nuclio/nuclio-sdk-go/context.go | 49 - .../nuclio/nuclio-sdk-go/databinding.go | 21 - .../github.com/nuclio/nuclio-sdk-go/doc.go | 23 - .../github.com/nuclio/nuclio-sdk-go/errgen.go | 23 - .../github.com/nuclio/nuclio-sdk-go/errors.go | 1223 ----------------- .../github.com/nuclio/nuclio-sdk-go/event.go | 275 ---- .../nuclio/nuclio-sdk-go/gen_errors.go | 183 --- .../nuclio/nuclio-sdk-go/response.go | 25 - .../github.com/nuclio/nuclio-sdk-go/types.go | 20 - .../github.com/nuclio/nuclio-test-go/LICENSE | 201 --- .../nuclio/nuclio-test-go/README.md | 75 - .../github.com/nuclio/nuclio-test-go/event.go | 93 -- .../nuclio/nuclio-test-go/nutest.go | 116 -- .../vendor/github.com/nuclio/zap/README.md | 1 - .../vendor/github.com/nuclio/zap/buffer.go | 143 -- .../vendor/github.com/nuclio/zap/logger.go | 458 ------ .../vendor/github.com/nuclio/zap/mux.go | 139 -- .../v3io/v3io-go/pkg/dataplane/container.go | 5 +- .../v3io/v3io-go/pkg/dataplane/context.go | 5 + .../v3io-go/pkg/dataplane/http/container.go | 4 +- .../v3io-go/pkg/dataplane/http/context.go | 315 ++++- .../v3io/v3io-go/pkg/dataplane/types.go | 27 +- .../v3io/v3io-tsdb/vendor/modules.txt | 2 +- 120 files changed, 4370 insertions(+), 9002 deletions(-) create mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/errors.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.mod delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.sum delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.gitignore delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.travis.yml delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/HACK.md delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/LICENSE delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/Makefile delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/README.md delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/context.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/databinding.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/doc.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errgen.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errors.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/event.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/gen_errors.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/response.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/types.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/LICENSE delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/README.md delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/event.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/nutest.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/README.md delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/buffer.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/logger.go delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/mux.go create mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/errors.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.mod delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.sum delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.gitignore delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.travis.yml delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/HACK.md delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/LICENSE delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/Makefile delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/README.md delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/context.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/databinding.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/doc.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errgen.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errors.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/event.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/gen_errors.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/response.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/types.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/LICENSE delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/README.md delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/event.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/nutest.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/README.md delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/buffer.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/logger.go delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/mux.go diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile index 4a6e480e..73465498 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile @@ -32,15 +32,7 @@ BUILD_OPTS := -ldflags " \ -X $(CONFIG_PKG).branch=$(GIT_BRANCH)" \ -v -o "$(GOPATH)/bin/$(TSDBCTL_BIN_NAME)" -TSDB_BUILD_COMMAND ?= GO111MODULE="on" CGO_ENABLED=0 go build $(BUILD_OPTS) ./cmd/tsdbctl - -.PHONY: fmt -fmt: - gofmt -l -s -w . - -.PHONY: get -get: - GO111MODULE="on" go mod tidy +TSDB_BUILD_COMMAND ?= CGO_ENABLED=0 go build $(BUILD_OPTS) ./cmd/tsdbctl .PHONY: test test: diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod index 1b88ca5d..0f30f3fd 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod @@ -7,12 +7,11 @@ require ( github.com/cpuguy83/go-md2man v1.0.10 // indirect github.com/ghodss/yaml v1.0.0 github.com/imdario/mergo v0.3.7 - github.com/kr/pretty v0.2.0 // indirect + github.com/kr/pretty v0.1.0 // indirect github.com/nuclio/logger v0.0.1 github.com/nuclio/nuclio-sdk-go v0.0.0-20190205170814-3b507fbd0324 github.com/nuclio/nuclio-test-go v0.0.0-20180704132150-0ce6587f8e37 github.com/nuclio/zap v0.0.2 - github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b // indirect github.com/pkg/errors v0.8.1 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a github.com/spf13/cobra v0.0.3 @@ -20,7 +19,7 @@ require ( github.com/stretchr/testify v1.4.0 github.com/tinylib/msgp v1.1.1 // indirect github.com/v3io/frames v0.6.8-v0.9.11 - github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6 + github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d github.com/v3io/v3io-go-http v0.0.0-20190415143924-cc2fbcde6663 // indirect github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 google.golang.org/genproto v0.0.0-20181026194446-8b5d7a19e2d9 // indirect diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum index 9b50f98e..75aa4c29 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum @@ -34,8 +34,6 @@ github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -63,8 +61,6 @@ github.com/nuclio/nuclio-test-go v0.0.0-20180704132150-0ce6587f8e37/go.mod h1:aO github.com/nuclio/zap v0.0.0-20180228181516-4a2bd2f9ef28/go.mod h1:SUxPsgePvlyjx6c5MtGdB50pf0IQThtlyLwISLboeuc= github.com/nuclio/zap v0.0.2 h1:rY5PkMOl8CTkqRqIPuxziBiKK6Mq/8oEurfgRnNtqf0= github.com/nuclio/zap v0.0.2/go.mod h1:SUxPsgePvlyjx6c5MtGdB50pf0IQThtlyLwISLboeuc= -github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b h1:yS0+/i6mwRZCdssUd+MkFJkCn/Evh1PlUKCYe3aCtQw= -github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pavius/zap v0.0.0-20180228181622-8d52692529b8 h1:1N/m7VjDY1Pd30Uwv6bLttZVFQm3n8RUK9Ylf2J+4a4= github.com/pavius/zap v0.0.0-20180228181622-8d52692529b8/go.mod h1:6FWOCx06uh50GClv8S2cfk3asqTJs3qq3ZNRtLZE77I= github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8 h1:WqLgmr/wj9TO5Sc6oYPQRAJBxuHE0NTeuVeFnT+FZVo= @@ -80,9 +76,9 @@ github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9 github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/russross/blackfriday v1.5.2+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2+incompatible h1:/YIL6L1Deczl4O/cQ7ZVdrdKwuB6y7EWpw9LkD8xofE= +github.com/russross/blackfriday v1.5.2+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -108,10 +104,9 @@ github.com/v3io/frames v0.6.8-v0.9.11/go.mod h1:V3j8yjzhNNGXjosCBn7Qf8C8jo25Y+7G github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871 h1:myF4tU/HdFWU1UzMdf16cHRbownzsyvL7VKIHqkrSvo= github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871/go.mod h1:QD2Bo64oyTWzeV8RFehXS0hZEDFgOK99/h2a6ErRu6E= github.com/v3io/v3io-go v0.0.0-20191024084247-042df6b5ee40eb60996ab7f4e74ec9aa07d996c4/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= -github.com/v3io/v3io-go v0.0.0-20191120130819-9003ae83f0b673afb88b862d8f46dcc818684450 h1:3JMzABqziU+dBO4NCoIGRhI/NGYPd6d6Zug68nTXQkU= github.com/v3io/v3io-go v0.0.0-20191120130819-9003ae83f0b673afb88b862d8f46dcc818684450/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= -github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6 h1:+52DyMCjcWg6uXAlTe0KgbOsiQqUKrtL9tBPSERhyFg= -github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= +github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d h1:OotbIx7+QYju2DlAAVxWz0QFzBicHLc47u9DJGpVUL4= +github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= github.com/v3io/v3io-go-http v0.0.0-20190221115935-53e2b487c9a2 h1:NJc63wM25iS+ci5z7LVwjWD4QM0QpTQw/fovKzatss0= github.com/v3io/v3io-go-http v0.0.0-20190221115935-53e2b487c9a2/go.mod h1:GXYcR9MxgfbE3BJdkXki5EclvtS8Nxu2RQNLA8hMMog= github.com/v3io/v3io-go-http v0.0.0-20190415143924-cc2fbcde6663 h1:WZcM/GRBAastacksmv5pODbtr8fJ/0/9EsPDpPfXkRk= diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go index 614e095b..8e3fa0d3 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go @@ -122,9 +122,7 @@ type MetricsCache struct { updatesComplete chan int newUpdates chan int - lastMetric uint64 - - // TODO: consider switching to synch.Map (https://golang.org/pkg/sync/#Map) + lastMetric uint64 cacheMetricMap map[cacheKey]*MetricState // TODO: maybe use hash as key & combine w ref cacheRefMap map[uint64]*MetricState // TODO: maybe turn to list + free list, periodically delete old matrics @@ -220,15 +218,6 @@ func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint6 return 0, err } - isValueVariantType := false - // If the value is not of Float type assume it's variant type. - switch v.(type) { - case int, int64, float64, float32: - isValueVariantType = false - default: - isValueVariantType = true - } - name, key, hash := lset.GetKey() err = utils.IsValidMetricName(name) if err != nil { @@ -249,9 +238,11 @@ func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint6 aggrMetrics = append(aggrMetrics, aggrMetric) } } - metric = &MetricState{Lset: lset, key: key, name: name, hash: hash, - aggrs: aggrMetrics, isVariant: isValueVariantType} - + metric = &MetricState{Lset: lset, key: key, name: name, hash: hash, aggrs: aggrMetrics} + // if the (first) value is not float, use variant encoding, TODO: test w schema + if _, ok := v.(float64); !ok { + metric.isVariant = true + } metric.store = NewChunkStore(mc.logger, lset.LabelNames(), false) mc.addMetric(hash, name, metric) } else { @@ -261,18 +252,6 @@ func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint6 err = metric.error() metric.setError(nil) - if isValueVariantType != metric.isVariant { - newValueType := "numeric" - if isValueVariantType { - newValueType = "string" - } - existingValueType := "numeric" - if metric.isVariant { - existingValueType = "string" - } - return 0, errors.Errorf("Cannot append %v type metric to %v type metric.", newValueType, existingValueType) - } - mc.appendTV(metric, t, v) for _, aggrMetric := range aggrMetrics { mc.appendTV(aggrMetric, t, v) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go index 2d82b0af..11faf119 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go @@ -29,7 +29,6 @@ import ( "github.com/pkg/errors" "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-go/pkg/errors" - "github.com/v3io/v3io-tsdb/pkg/utils" ) // Start event loops for handling metric updates (appends and Get/Update DB responses) @@ -271,7 +270,7 @@ func (mc *MetricsCache) handleResponse(metric *MetricState, resp *v3io.Response, if resp.Error != nil && metric.getState() != storeStateGet { req := reqInput.(*v3io.UpdateItemInput) - mc.logger.WarnWith("I/O failure", "id", resp.ID, "err", resp.Error, "key", metric.key, + mc.logger.ErrorWith("I/O failure", "id", resp.ID, "err", resp.Error, "key", metric.key, "in-flight", mc.updatesInFlight, "mqueue", mc.metricQueue.Length(), "numsamples", metric.store.samplesQueueLength(), "path", req.Path, "update expression", req.Expression) } else { @@ -305,17 +304,8 @@ func (mc *MetricsCache) handleResponse(metric *MetricState, resp *v3io.Response, // Metrics with too many update errors go into Error state metric.retryCount++ if e, hasStatusCode := resp.Error.(v3ioerrors.ErrorWithStatusCode); hasStatusCode && e.StatusCode() != http.StatusServiceUnavailable { - // If condition was evaluated as false log this and report this error upstream. - if utils.IsFalseConditionError(resp.Error) { - req := reqInput.(*v3io.UpdateItemInput) - // This might happen on attempt to add metric value of wrong type, i.e. float <-> string - errMsg := fmt.Sprintf("trying to ingest values of incompatible data type. Metric %q has not been updated.", req.Path) - mc.logger.ErrorWith(errMsg) - setError(mc, metric, errors.Wrap(resp.Error, errMsg)) - } else { - mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed with status code %d.", e.StatusCode())) - setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed due to status code %d.", e.StatusCode()))) - } + mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed with status code %d.", e.StatusCode())) + setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed due to status code %d.", e.StatusCode()))) clear() return false } else if metric.retryCount == maxRetriesOnWrite { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go index 68cfa292..b9387adf 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go @@ -418,20 +418,17 @@ func (cs *chunkStore) writeChunks(mc *MetricsCache, metric *MetricState) (hasPen var encodingExpr string if !cs.isAggr() { - encodingExpr = fmt.Sprintf("%s='%d'; ", config.EncodingAttrName, activeChunk.appender.Encoding()) + encodingExpr = fmt.Sprintf("%v='%d'; ", config.EncodingAttrName, activeChunk.appender.Encoding()) } - lsetExpr := fmt.Sprintf("%s='%s'; ", config.LabelSetAttrName, metric.key) + lsetExpr := fmt.Sprintf("%v='%s'; ", config.LabelSetAttrName, metric.key) expr = lblexpr + encodingExpr + lsetExpr + expr } // Call the V3IO async UpdateItem method - conditionExpr := fmt.Sprintf("NOT exists(%s) OR (exists(%s) AND %s == '%d')", - config.EncodingAttrName, config.EncodingAttrName, - config.EncodingAttrName, activeChunk.appender.Encoding()) expr += fmt.Sprintf("%v=%d;", config.MaxTimeAttrName, cs.maxTime) // TODO: use max() expr path := partition.GetMetricPath(metric.name, metric.hash, cs.labelNames, cs.isAggr()) request, err := mc.container.UpdateItem( - &v3io.UpdateItemInput{Path: path, Expression: &expr, Condition: conditionExpr}, metric, mc.responseChan) + &v3io.UpdateItemInput{Path: path, Expression: &expr}, metric, mc.responseChan) if err != nil { mc.logger.ErrorWith("UpdateItem failed", "err", err) hasPendingUpdates = false diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go index 7d7845e9..ae13c04f 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go @@ -108,18 +108,36 @@ func (a *varAppender) Chunk() Chunk { } func (a *varAppender) Append(t int64, v interface{}) { + if v == nil { a.appendNoValue(t, varTypeNil, varValueNone) return } - switch val := v.(type) { + switch vType := v.(type) { + case float64: + val := v.(float64) + if val == 0 { + a.appendNoValue(t, varTypeFloat64, varValueZero) + return + + } + + if math.IsNaN(val) { + a.appendNoValue(t, varTypeFloat64, varValueNone) + return + } + + a.appendWithUint(t, varTypeFloat64, math.Float64bits(val)) + case string: - a.appendWithValue(t, varTypeString, []byte(val)) + val := []byte(v.(string)) + a.appendWithValue(t, varTypeString, val) default: - a.logger.Error("unsupported type %T of value %v\n", v, v) + a.logger.Error("unsupported type %v of value %v\n", vType, v) } + } func (a *varAppender) appendNoValue(t int64, varType, varVal byte) { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go index 06e8df5c..44029b5f 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go @@ -173,18 +173,7 @@ func (a *xorAppender) Chunk() Chunk { func (a *xorAppender) Append(t int64, vvar interface{}) { var tDelta uint64 num := *a.samples - - var v float64 - switch typedValue := vvar.(type) { - case int: - v = float64(typedValue) - case float64: - v = typedValue - default: - a.logger.Warn("Discarding sample {time: %d, value: %v}, as it's value is of incompatible data type. "+ - "Reason: expected 'float' actual '%T'.", t, vvar, vvar) - return - } + v := vvar.(float64) // Do not append if sample is too old. if t < a.t { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go index d2c51e19..48d2bcbc 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go @@ -62,14 +62,16 @@ const ( DefaultUseServerAggregateCoefficient = 3 // KV attribute names - MaxTimeAttrName = "_maxtime" - LabelSetAttrName = "_lset" - EncodingAttrName = "_enc" - OutOfOrderAttrName = "_ooo" - MetricNameAttrName = "_name" - ObjectNameAttrName = "__name" - ChunkAttrPrefix = "_v" - AggregateAttrPrefix = "_v_" + MaxTimeAttrName = "_maxtime" + LabelSetAttrName = "_lset" + EncodingAttrName = "_enc" + OutOfOrderAttrName = "_ooo" + MetricNameAttrName = "_name" + ObjectNameAttrName = "__name" + ChunkAttrPrefix = "_v" + AggregateAttrPrefix = "_v_" + MtimeSecsAttributeName = "__mtime_secs" + MtimeNSecsAttributeName = "__mtime_nsecs" PrometheusMetricNameAttribute = "__name__" diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go index 8a101bcb..f82ed68a 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go @@ -198,7 +198,7 @@ func (p *PartitionManager) updateSchema() error { } input := &v3io.PutItemInput{Path: schemaFilePath, Attributes: attributes} - err := p.container.PutItemSync(input) + _, err := p.container.PutItemSync(input) if err != nil { outerError = errors.Wrap(err, "failed to update partitions table.") @@ -238,7 +238,7 @@ func (p *PartitionManager) DeletePartitionsFromSchema(partitionsToDelete []*DBPa deletePartitionExpression.WriteString(");") } expression := deletePartitionExpression.String() - err := p.container.UpdateItemSync(&v3io.UpdateItemInput{Path: p.GetSchemaFilePath(), Expression: &expression}) + _, err := p.container.UpdateItemSync(&v3io.UpdateItemInput{Path: p.GetSchemaFilePath(), Expression: &expression}) if err != nil { return err } @@ -592,6 +592,33 @@ func (p *DBPartition) Time2Bucket(t int64) int { return int((t - p.startTime) / p.rollupTime) } +// Return the start time of an aggregation bucket by id +func (p *DBPartition) GetAggregationBucketStartTime(id int) int64 { + return p.startTime + int64(id)*p.rollupTime +} + +// Return the end time of an aggregation bucket by id +func (p *DBPartition) GetAggregationBucketEndTime(id int) int64 { + return p.startTime + int64(id+1)*p.rollupTime - 1 +} + +func (p *DBPartition) Times2BucketRange(start, end int64) []int { + var buckets []int + + if start > p.GetEndTime() || end < p.startTime { + return buckets + } + + startingAggrBucket := p.Time2Bucket(start) + endAggrBucket := p.Time2Bucket(end) + + for bucketID := startingAggrBucket; bucketID <= endAggrBucket; bucketID++ { + buckets = append(buckets, bucketID) + } + + return buckets +} + // Return the nearest chunk start time for the specified time func (p *DBPartition) GetChunkMint(t int64) int64 { if t > p.GetEndTime() { @@ -622,6 +649,37 @@ func (p *DBPartition) TimeToChunkId(tmilli int64) (int, error) { } } +// Check if a chunk (by attribute name) is in the given time range. +func (p *DBPartition) IsChunkInRangeByAttr(attr string, mint, maxt int64) bool { + + // Discard '_v' prefix + chunkIDStr := attr[2:] + chunkID, err := strconv.ParseInt(chunkIDStr, 10, 64) + if err != nil { + return false + } + + chunkStartTime := p.startTime + (chunkID-1)*p.chunkInterval + chunkEndTime := chunkStartTime + p.chunkInterval - 1 + + return mint <= chunkStartTime && maxt >= chunkEndTime +} + +// Get a chunk's start time by it's attribute name +func (p *DBPartition) GetChunkStartTimeByAttr(attr string) (int64, error) { + + // Discard '_v' prefix + chunkIDStr := attr[2:] + chunkID, err := strconv.ParseInt(chunkIDStr, 10, 64) + if err != nil { + return 0, err + } + + chunkStartTime := p.startTime + (chunkID-1)*p.chunkInterval + + return chunkStartTime, nil +} + // Check whether the specified time is within the range of this partition func (p *DBPartition) InRange(t int64) bool { if p.manager.cyclic { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go index 28030d2a..ce214746 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go @@ -85,26 +85,11 @@ func (suite *testRawChunkIterSuite) TestRawChunkIteratorWithZeroValue() { prevT, prevV := iter.PeakBack() suite.Require().Equal(ingestData[index].Time, t, "current time does not match") - - switch val := ingestData[index].Value.(type) { - case float64: - suite.Require().Equal(val, v, "current value does not match") - case int: - suite.Require().Equal(float64(val), v, "current value does not match") - default: - suite.Require().Equal(val, v, "current value does not match") - } + suite.Require().Equal(ingestData[index].Value, v, "current value does not match") if index > 0 { suite.Require().Equal(ingestData[index-1].Time, prevT, "current time does not match") - switch val := ingestData[index-1].Value.(type) { - case float64: - suite.Require().Equal(val, prevV, "current value does not match") - case int: - suite.Require().Equal(float64(val), prevV, "current value does not match") - default: - suite.Require().Equal(val, prevV, "current value does not match") - } + suite.Require().Equal(ingestData[index-1].Value, prevV, "current value does not match") } index++ } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go index 232fc610..35e95d10 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go @@ -249,10 +249,7 @@ func downsampleRawData(ctx *selectQueryContext, res *qryResults, func aggregateClientAggregatesCrossSeries(ctx *selectQueryContext, res *qryResults, previousPartitionLastTime int64, previousPartitionLastValue float64) (int64, float64, error) { ctx.logger.Debug("using Client Aggregates Collector for metric %v", res.name) - it, ok := newRawChunkIterator(res, ctx.logger).(*RawChunkIterator) - if !ok { - return previousPartitionLastTime, previousPartitionLastValue, nil - } + it := newRawChunkIterator(res, ctx.logger).(*RawChunkIterator) var previousPartitionEndBucket int if previousPartitionLastTime != 0 { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go index 45b8724e..efb5fd5d 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go @@ -80,7 +80,7 @@ func (suite *testClientAggregatesSuite) TestQueryAggregateWithNameWildcard() { suite.T().Fatal(err) } - suite.compareMultipleMetrics(data, expected, metricName, aggr) + assert.Equal(suite.T(), expected[metricName][aggr], data, "queried data does not match expected") } assert.Equal(suite.T(), len(expectedData)*len(expected), seriesCount, "series count didn't match expected") @@ -139,7 +139,7 @@ func (suite *testClientAggregatesSuite) TestQueryAggregateWithFilterOnMetricName suite.T().Fatal(err) } - suite.compareMultipleMetrics(data, expected, metricName, aggr) + assert.Equal(suite.T(), expected[metricName][aggr], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -195,7 +195,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesSinglePartition() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -219,8 +219,8 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartition() { tsdbtest.TestOption{ Key: tsdbtest.OptTimeSeries, Value: tsdbtest.TimeSeries{tsdbtest.Metric{ - Labels: labels1, Name: "cpu", + Labels: labels1, Data: ingestedData}, }}) tsdbtest.InsertData(suite.T(), testParams) @@ -255,7 +255,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartition() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -315,7 +315,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartitionNonCon suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") @@ -371,7 +371,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartitionOneSte suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -473,7 +473,7 @@ func (suite *testClientAggregatesSuite) TestSelectAggregatesByRequestedColumns() suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -530,7 +530,7 @@ func (suite *testClientAggregatesSuite) TestSelectAggregatesAndRawByRequestedCol suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -588,7 +588,7 @@ func (suite *testClientAggregatesSuite) TestQueryAllData() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -618,8 +618,7 @@ func (suite *testClientAggregatesSuite) TestAggregatesWithZeroStep() { }}) tsdbtest.InsertData(suite.T(), testParams) - expected := map[string][]tsdbtest.DataPoint{ - "max": {{Time: suite.basicQueryTime, Value: 40}}, + expected := map[string][]tsdbtest.DataPoint{"max": {{Time: suite.basicQueryTime, Value: 40}}, "min": {{Time: suite.basicQueryTime, Value: 10}}, "sum": {{Time: suite.basicQueryTime, Value: 100}}, "count": {{Time: suite.basicQueryTime, Value: 4}}, @@ -647,9 +646,7 @@ func (suite *testClientAggregatesSuite) TestAggregatesWithZeroStep() { suite.T().Fatal(err) } - for i, dataPoint := range expected[agg] { - suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") - } + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 4, seriesCount, "series count didn't match expected") @@ -701,7 +698,7 @@ func (suite *testClientAggregatesSuite) TestUsePreciseAggregationsConfig() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(3, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go index 624ec921..6488677c 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go @@ -82,7 +82,8 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesTimesFalls if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -152,7 +153,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregates() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -218,7 +219,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiParti suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -284,7 +285,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesWithInterp suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -358,7 +359,8 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiParti if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -442,7 +444,8 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiParti if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -509,7 +512,8 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesWithInterp if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -566,7 +570,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesSinglePart suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -625,7 +629,8 @@ func (suite *testCrossSeriesAggregatesSuite) TestOnlyVirtualCrossSeriesAggregate if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -694,7 +699,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesSameLabelM metricName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) suite.NoError(err) - suite.compareSingleMetricWithAggregator(data, expected, fmt.Sprintf("%v-%v", agg, metricName)) + suite.Require().Equal(expected[fmt.Sprintf("%v-%v", agg, metricName)], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -763,7 +768,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesDifferentL data, err := tsdbtest.IteratorToSlice(iter) suite.NoError(err) - suite.compareSingleMetric(data, expected) + suite.Require().Equal(expected, data, "queried data does not match expected") } suite.Require().Equal(2, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go index f52546de..ace74820 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go @@ -3,7 +3,6 @@ package pqueriertest import ( - "errors" "fmt" "math" "strings" @@ -87,17 +86,7 @@ func (suite *testSelectDataframeSuite) TestAggregatesWithZeroStepSelectDataframe currentColAggregate := strings.Split(col.Name(), "(")[0] f, err := col.FloatAt(0) assert.NoError(suite.T(), err) - - var expectedFloat float64 - switch val := expected[currentColAggregate].Value.(type) { - case int: - expectedFloat = float64(val) - case float64: - expectedFloat = val - default: - suite.Failf("invalid data type", "expected int or float, actual type is %t", val) - } - suite.Require().Equal(expectedFloat, f) + suite.Require().Equal(expected[currentColAggregate].Value, f) } } @@ -219,23 +208,11 @@ func (suite *testSelectDataframeSuite) Test2Series1EmptySelectDataframe() { assert.Equal(suite.T(), len(ingestedData), col.Len()) for i := 0; i < col.Len(); i++ { currentExpected := expected[col.Name()][i].Value - switch val := currentExpected.(type) { - case float64: - fv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(val) && math.IsNaN(fv)) { - assert.Equal(suite.T(), currentExpected, fv) - } - case int: - iv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), float64(val), iv) - case string: - sv, err := col.StringAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), val, sv) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + + if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) } } } @@ -391,24 +368,11 @@ func (suite *testSelectDataframeSuite) TestQueryDataFrameMultipleMetricsWithMult currentExpectedData := expectedData[fmt.Sprintf("%v-%v", col.Name(), frame.Labels()["os"])] assert.Equal(suite.T(), len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) - switch val := currentExpected.(type) { - case float64: - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(val) && math.IsNaN(f)) { - assert.Equal(suite.T(), currentExpected, f) - } - case int: - iv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), float64(val), iv) - case string: - s, err := col.StringAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), val, s) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) + if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) } } } @@ -648,24 +612,11 @@ func (suite *testSelectDataframeSuite) TestQueryDataFrameMultipleMetrics() { currentExpectedData := expectedData[col.Name()] suite.Require().Equal(len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) - switch val := currentExpected.(type) { - case float64: - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(val) && math.IsNaN(f)) { - assert.Equal(suite.T(), currentExpected, f) - } - case int: - iv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), float64(val), iv) - case string: - s, err := col.StringAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), val, s) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) + if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { + suite.Require().Equal(currentExpected, f) } } } @@ -748,23 +699,11 @@ func (suite *testSelectDataframeSuite) TestColumnOrder() { currentExpectedData := expectedData[col.Name()] suite.Require().Equal(len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value - switch val := currentExpected.(type) { - case float64: - fv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(val) && math.IsNaN(fv)) { - assert.Equal(suite.T(), currentExpected, fv) - } - case int: - iv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), float64(val), iv) - case string: - sv, err := col.StringAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), val, sv) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + + if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { + suite.Require().Equal(currentExpected, f) } } } @@ -833,24 +772,11 @@ func (suite *testSelectDataframeSuite) TestQueryNonExistingMetric() { currentExpectedData := expectedData[col.Name()] suite.Require().Equal(len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) - switch val := currentExpected.(type) { - case float64: - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(val) && math.IsNaN(f)) { - assert.Equal(suite.T(), currentExpected, f) - } - case int: - iv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), float64(val), iv) - case string: - s, err := col.StringAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), val, s) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) + if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { + suite.Require().Equal(currentExpected, f) } } } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go index e2064425..72d2f7ac 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go @@ -120,7 +120,7 @@ func (suite *testDownsampleSuite) TestRawDataSinglePartitionWithDownSample() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expectedData) + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -176,7 +176,7 @@ func (suite *testDownsampleSuite) TestRawDataDownSampleMultiPartitions() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expectedData) + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go index 8346aea1..2d58da27 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go @@ -66,21 +66,3 @@ func (suite *basicQueryTestSuite) TearDownTest() { tsdbtest.DeleteTSDB(suite.T(), suite.v3ioConfig) } } - -func (suite *basicQueryTestSuite) compareSingleMetric(data []tsdbtest.DataPoint, expected []tsdbtest.DataPoint) { - for i, dataPoint := range data { - suite.Require().True(dataPoint.Equals(expected[i]), "queried data does not match expected") - } -} - -func (suite *basicQueryTestSuite) compareSingleMetricWithAggregator(data []tsdbtest.DataPoint, expected map[string][]tsdbtest.DataPoint, agg string) { - for i, dataPoint := range data { - suite.Require().True(dataPoint.Equals(expected[agg][i]), "queried data does not match expected") - } -} - -func (suite *basicQueryTestSuite) compareMultipleMetrics(data []tsdbtest.DataPoint, expected map[string]map[string][]tsdbtest.DataPoint, metricName string, aggr string) { - for i, dataPoint := range data { - suite.Require().True(dataPoint.Equals(expected[metricName][aggr][i]), "queried data does not match expected") - } -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go index cb7e646f..4b63277b 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go @@ -88,7 +88,8 @@ func (suite *testSQLSyntaxQuerySuite) TestGroupByOneLabelSinglePartition() { agg := set.At().Labels().Get(aggregate.AggregateLabel) groupByValue := set.At().Labels().Get("os") suite.Require().NoError(err) - suite.compareMultipleMetrics(data, expected, groupByValue, agg) + + suite.Require().Equal(expected[groupByValue][agg], data, "queried data does not match expected") } suite.Require().Equal(4, seriesCount, "series count didn't match expected") @@ -171,7 +172,8 @@ func (suite *testSQLSyntaxQuerySuite) TestGroupByMultipleLabelsSinglePartition() labelsStr := strings.Join(groupByValue, "-") suite.Require().NoError(err) - suite.compareMultipleMetrics(data, expected, labelsStr, agg) + + suite.Require().Equal(expected[labelsStr][agg], data, "queried data does not match expected") } suite.Require().Equal(6, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go index 3a61864c..d7c94207 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go @@ -3,7 +3,6 @@ package pqueriertest import ( - "errors" "fmt" "math" "testing" @@ -74,7 +73,7 @@ func (suite *testRawQuerySuite) TestRawDataSinglePartition() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expectedData) + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -130,7 +129,7 @@ func (suite *testRawQuerySuite) TestRawDataMultiplePartitions() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expectedData) + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -187,7 +186,7 @@ func (suite *testRawQuerySuite) TestFilterOnLabel() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expectedData) + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -282,9 +281,7 @@ func (suite *testRawQuerySuite) TestSelectRawDataByRequestedColumns() { suite.T().Fatal(err) } - for i, dataPoint := range expected { - suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") - } + assert.Equal(suite.T(), expected, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -349,9 +346,7 @@ func (suite *testRawQuerySuite) TestRawDataMultipleMetrics() { suite.T().Fatal(err) } - for i, dataPoint := range expectedData[name] { - suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") - } + assert.Equal(suite.T(), expectedData[name], data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -497,9 +492,7 @@ func (suite *testRawQuerySuite) TestQueryMultipleMetricsWithMultipleLabelSets() suite.T().Fatal(err) } - for i, dataPoint := range expectedData[fmt.Sprintf("%v-%v", name, os)] { - suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") - } + assert.Equal(suite.T(), expectedData[fmt.Sprintf("%v-%v", name, os)], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -548,7 +541,7 @@ func (suite *testRawQuerySuite) TestDifferentLabelSetsInDifferentPartitions() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expected) + suite.Require().Equal(expected, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -598,9 +591,7 @@ func (suite *testRawQuerySuite) TestDifferentMetricsInDifferentPartitions() { suite.T().Fatal(err) } - for i, dataPoint := range expected { - suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") - } + suite.Require().Equal(expected, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -772,20 +763,7 @@ func (suite *testRawQuerySuite) TestLoadPartitionsFromAttributes() { suite.T().Fatal(err) } - for i := 0; i < len(expectedData); i++ { - assert.Equal(suite.T(), expectedData[i].Time, data[i].Time) - currentExpected := expectedData[i].Value - switch val := currentExpected.(type) { - case float64: - assert.Equal(suite.T(), val, data[i].Value) - case int: - assert.Equal(suite.T(), float64(val), data[i].Value) - case string: - assert.Equal(suite.T(), val, data[i].Value) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) - } - } + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go index 4c579fff..811a3c1d 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go @@ -75,7 +75,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesSinglePartition() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -135,7 +135,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesSinglePartitionNegative suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -200,7 +200,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesMultiPartition() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") @@ -263,7 +263,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesMultiPartitionNonConcre suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") @@ -319,7 +319,7 @@ func (suite *testServerAggregatesSuite) TestSelectServerAggregatesAndRawByReques suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -375,8 +375,9 @@ func (suite *testServerAggregatesSuite) TestAggregatesWithDisabledClientAggregat if err != nil { suite.T().Fatal(err) } - - suite.compareSingleMetricWithAggregator(data, expected, agg) + currentExpected, ok := expected[agg] + suite.Require().Equal(true, ok, "got unexpected aggregate result") + assert.Equal(suite.T(), currentExpected, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go index be0a304d..a8943199 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go @@ -80,7 +80,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowBigg suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -144,7 +144,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowSmal suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -208,7 +208,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowEqua suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -273,7 +273,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowExce suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -337,7 +337,7 @@ func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowBigg suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -401,7 +401,7 @@ func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowEqua suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -471,7 +471,8 @@ func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowEqua if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go new file mode 100644 index 00000000..dfe24a51 --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go @@ -0,0 +1,1141 @@ +// +build integration + +package tsdb_test + +import ( + "fmt" + "math" + "path" + "strconv" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + v3io "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + . "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +func timeStringToMillis(timeStr string) int64 { + ta, _ := time.Parse(time.RFC3339, timeStr) + return ta.Unix() * 1000 +} +func TestDeleteTable(t *testing.T) { + ta, _ := time.Parse(time.RFC3339, "2018-10-03T05:00:00Z") + t1 := ta.Unix() * 1000 + tb, _ := time.Parse(time.RFC3339, "2018-10-07T05:00:00Z") + t2 := tb.Unix() * 1000 + tc, _ := time.Parse(time.RFC3339, "2018-10-11T05:00:00Z") + t3 := tc.Unix() * 1000 + td, _ := time.Parse(time.RFC3339, "now + 1w") + futurePoint := td.Unix() * 1000 + + defaultTimeMillis := timeStringToMillis("2019-07-21T00:00:00Z") + generalData := []tsdbtest.DataPoint{ + // partition 1 + // chunk a + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + // partition 2 + // chunk a + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + // partition 3 + // chunk a + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}} + partitions1StartTime := timeStringToMillis("2019-07-21T00:00:00Z") + partitions2StartTime := timeStringToMillis("2019-07-23T00:00:00Z") + partitions3StartTime := timeStringToMillis("2019-07-25T00:00:00Z") + + testCases := []struct { + desc string + deleteParams DeleteParams + data tsdbtest.TimeSeries + expectedData map[string][]tsdbtest.DataPoint + expectedPartitions []int64 + ignoreReason string + }{ + {desc: "Should delete all table by time", + deleteParams: DeleteParams{ + From: 0, + To: 9999999999999, + IgnoreErrors: true, + }, + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}, + {Time: futurePoint, Value: 555.5}}, + }}, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {}}, + }, + {desc: "Should delete all table by deleteAll", + deleteParams: DeleteParams{ + From: 0, + To: t1, + DeleteAll: true, + IgnoreErrors: true, + }, + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}, + {Time: futurePoint, Value: 555.5}}, + }}, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {}}, + }, + {desc: "Should delete whole partitions", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Filter: "os == 'win'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-win": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + + { + desc: "Should delete partial chunk in the start", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 4*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk in the middle", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 3*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 7*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk in the end", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + }, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + }, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partially last chunk and update max time", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions3StartTime + 1*tsdbtest.HoursInMillis + 6*tsdbtest.MinuteInMillis, + To: partitions3StartTime + 1*tsdbtest.HoursInMillis + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole last chunk and update max time", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions3StartTime + 1*tsdbtest.HoursInMillis, + To: partitions3StartTime + 2*tsdbtest.HoursInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole all samples in chunk but time range is not bigger then chunk", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 1*tsdbtest.HoursInMillis + 2*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 2*tsdbtest.HoursInMillis + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testDeleteTSDBCase(t, + tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptDropTableOnTearDown, + Value: !test.deleteParams.DeleteAll}, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: test.data}, + ), test.deleteParams, test.expectedData, test.expectedPartitions) + }) + } +} + +func getCurrentPartitions(test *testing.T, container v3io.Container, path string) []int64 { + input := &v3io.GetItemInput{Path: path + "/.schema", + AttributeNames: []string{"*"}} + res, err := container.GetItemSync(input) + if err != nil { + test.Fatal(errors.Wrap(err, "failed to get schema")) + } + output := res.Output.(*v3io.GetItemOutput) + var partitions []int64 + for part := range output.Item { + partitionsStartTime, _ := strconv.ParseInt(part[1:], 10, 64) // parse attribute and discard attribute prefix + partitions = append(partitions, partitionsStartTime) + } + return partitions +} + +func testDeleteTSDBCase(test *testing.T, testParams tsdbtest.TestParams, deleteParams DeleteParams, + expectedData map[string][]tsdbtest.DataPoint, expectedPartitions []int64) { + + adapter, teardown := tsdbtest.SetUpWithData(test, testParams) + defer teardown() + + container, err := utils.CreateContainer(adapter.GetLogger("container"), testParams.V3ioConfig(), adapter.HttpTimeout) + if err != nil { + test.Fatalf("failed to create new container. reason: %s", err) + } + + if err := adapter.DeleteDB(deleteParams); err != nil { + test.Fatalf("Failed to delete DB. reason: %s", err) + } + + if !deleteParams.DeleteAll { + actualPartitions := getCurrentPartitions(test, container, testParams.V3ioConfig().TablePath) + assert.ElementsMatch(test, expectedPartitions, actualPartitions, "remaining partitions are not as expected") + + qry, err := adapter.QuerierV2() + if err != nil { + test.Fatalf("Failed to create Querier. reason: %v", err) + } + + params := &pquerier.SelectParams{ + From: 0, + To: math.MaxInt64, + Filter: "1==1", + } + set, err := qry.Select(params) + if err != nil { + test.Fatalf("Failed to run Select. reason: %v", err) + } + + for set.Next() { + series := set.At() + labels := series.Labels() + osLabel := labels.Get("os") + metricName := labels.Get(config.PrometheusMetricNameAttribute) + iter := series.Iterator() + if iter.Err() != nil { + test.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + + actual, err := iteratorToSlice(iter) + if err != nil { + test.Fatal(err) + } + expectedDataKey := metricName + if osLabel != "" { + expectedDataKey = fmt.Sprintf("%v-%v", expectedDataKey, osLabel) + } + + assert.ElementsMatch(test, expectedData[expectedDataKey], actual, + "result data for '%v' didn't match, expected: %v\n actual: %v\n", expectedDataKey, expectedData[expectedDataKey], actual) + + } + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + } else { + container, tablePath := adapter.GetContainer() + tableSchemaPath := path.Join(tablePath, config.SchemaConfigFileName) + + // Validate: schema does not exist + _, err := container.GetObjectSync(&v3io.GetObjectInput{Path: tableSchemaPath}) + if err != nil { + if utils.IsNotExistsError(err) { + // OK - expected + } else { + test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tableSchemaPath, err) + } + } + + // Validate: table does not exist + _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: tablePath}) + if err != nil { + if utils.IsNotExistsError(err) { + // OK - expected + } else { + test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tablePath, err) + } + } + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go index 2ed1b042..4cfc9a6a 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go @@ -13,7 +13,6 @@ import ( "github.com/v3io/v3io-tsdb/internal/pkg/performance" "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" - "github.com/v3io/v3io-tsdb/pkg/pquerier" . "github.com/v3io/v3io-tsdb/pkg/tsdb" "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils" "github.com/v3io/v3io-tsdb/pkg/utils" @@ -25,59 +24,12 @@ const DaysInMillis = 24 * HoursInMillis type DataPoint struct { Time int64 - Value interface{} -} - -func (dp DataPoint) Equals(other DataPoint) bool { - if &dp.Time != &other.Time { - return true - } - if dp.Time != other.Time { - return false - } - - switch dpVal := dp.Value.(type) { - case float64: - switch oVal := other.Value.(type) { - case float64: - return dpVal == oVal - case int: - return dpVal == float64(oVal) - default: - return false - } - case int: - switch oVal := other.Value.(type) { - case float64: - return float64(dpVal) == oVal - case int: - return dpVal == oVal - default: - return false - } - case string: - switch oVal := other.Value.(type) { - case string: - return oVal == dpVal - case float64: - soVal := fmt.Sprintf("%f", oVal) - return dpVal == soVal - case int: - soVal := fmt.Sprintf("%d", oVal) - return dpVal == soVal - default: - return false - } - default: - return false - } + Value float64 } - type Metric struct { - Name string - Labels utils.Labels - Data []DataPoint - ExpectedCount *int + Name string + Labels utils.Labels + Data []DataPoint } type TimeSeries []Metric @@ -148,8 +100,7 @@ func DeleteTSDB(t testing.TB, v3ioConfig *config.V3ioConfig) { t.Fatalf("Failed to create an adapter. Reason: %s", err) } - now := time.Now().Unix() * 1000 // Current time (now) in milliseconds - if err := adapter.DeleteDB(true, true, 0, now); err != nil { + if err := adapter.DeleteDB(DeleteParams{DeleteAll: true, IgnoreErrors: true}); err != nil { t.Fatalf("Failed to delete a TSDB instance (table) on teardown. Reason: %s", err) } } @@ -175,13 +126,7 @@ func tearDown(t testing.TB, v3ioConfig *config.V3ioConfig, testParams TestParams func SetUp(t testing.TB, testParams TestParams) func() { v3ioConfig := testParams.V3ioConfig() - - if overrideTableName, ok := testParams["override_test_name"]; ok { - v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%v", overrideTableName)) - } else { - v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) - } - + v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) CreateTestTSDB(t, v3ioConfig) // Measure performance @@ -271,17 +216,12 @@ func ValidateCountOfSamples(t testing.TB, adapter *V3ioAdapter, metricName strin stepSize = queryAggStep } - qry, err := adapter.QuerierV2() + qry, err := adapter.Querier(nil, startTimeMs-stepSize, endTimeMs) if err != nil { t.Fatal(err, "Failed to create a Querier instance.") } - selectParams := &pquerier.SelectParams{From: startTimeMs - stepSize, - To: endTimeMs, - Functions: "count", - Step: stepSize, - Filter: fmt.Sprintf("starts(__name__, '%v')", metricName)} - set, err := qry.Select(selectParams) + set, err := qry.Select("", "count", stepSize, fmt.Sprintf("starts(__name__, '%v')", metricName)) var actualCount int for set.Next() { @@ -322,7 +262,7 @@ func ValidateRawData(t testing.TB, adapter *V3ioAdapter, metricName string, star for set.Next() { // Start over for each label set - var lastDataPoint *DataPoint = nil + var lastDataPoint = &DataPoint{Time: -1, Value: -1.0} if set.Err() != nil { t.Fatal(set.Err(), "Failed to get the next element from a result set.") @@ -337,16 +277,12 @@ func ValidateRawData(t testing.TB, adapter *V3ioAdapter, metricName string, star currentTime, currentValue := iter.At() currentDataPoint := &DataPoint{Time: currentTime, Value: currentValue} - if lastDataPoint != nil { - switch dataType := lastDataPoint.Value.(type) { - case string, float64, int, int64: - // Note: We cast float to integer to eliminate the risk of a precision error - if !isValid(lastDataPoint, currentDataPoint) { - t.Fatalf("The raw-data consistency check failed: metric name='%s'\n\tisValid(%v, %v) == false", - metricName, lastDataPoint, currentDataPoint) - } - default: - t.Fatalf("Got value of unsupported data type: %T", dataType) + if lastDataPoint.Value >= 0 { + // Note: We cast float to integer to eliminate the risk of a + // precision error + if !isValid(lastDataPoint, currentDataPoint) { + t.Fatalf("The raw-data consistency check failed: metric name='%s'\n\tisValid(%v, %v) == false", + metricName, lastDataPoint, currentDataPoint) } } lastDataPoint = currentDataPoint diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go index 760468a0..2a9b79d7 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go @@ -22,18 +22,24 @@ package tsdb import ( "context" + "encoding/base64" "encoding/json" "fmt" "math" pathUtil "path" "path/filepath" + "strconv" + "strings" + "sync" "time" "github.com/nuclio/logger" "github.com/pkg/errors" "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-go/pkg/dataplane/http" + "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/appender" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" "github.com/v3io/v3io-tsdb/pkg/partmgr" "github.com/v3io/v3io-tsdb/pkg/pquerier" @@ -42,7 +48,14 @@ import ( "github.com/v3io/v3io-tsdb/pkg/utils" ) -const defaultHttpTimeout = 30 * time.Second +const ( + defaultHttpTimeout = 30 * time.Second + + errorCodeString = "ErrorCode" + falseConditionOuterErrorCode = "184549378" // todo: change codes + falseConditionInnerErrorCode = "385876025" + maxExpressionsInUpdateItem = 1500 // max is 2000, we're taking a buffer since it doesn't work with 2000 +) type V3ioAdapter struct { startTimeMargin int64 @@ -54,6 +67,15 @@ type V3ioAdapter struct { partitionMngr *partmgr.PartitionManager } +type DeleteParams struct { + Metrics []string + Filter string + From, To int64 + DeleteAll bool + + IgnoreErrors bool +} + func CreateTSDB(cfg *config.V3ioConfig, schema *config.Schema) error { lgr, _ := utils.NewLogger(cfg.LogLevel) @@ -240,59 +262,55 @@ func (a *V3ioAdapter) QuerierV2() (*pquerier.V3ioQuerier, error) { return pquerier.NewV3ioQuerier(a.container, a.logger, a.cfg, a.partitionMngr), nil } -func (a *V3ioAdapter) DeleteDB(deleteAll bool, ignoreErrors bool, fromTime int64, toTime int64) error { - if deleteAll { +// Delete by time range can optionally specify metrics and filter by labels +func (a *V3ioAdapter) DeleteDB(deleteParams DeleteParams) error { + if deleteParams.DeleteAll { // Ignore time boundaries - fromTime = 0 - toTime = math.MaxInt64 - } - - partitions := a.partitionMngr.PartsForRange(fromTime, toTime, false) - for _, part := range partitions { - a.logger.Info("Deleting partition '%s'.", part.GetTablePath()) - err := utils.DeleteTable(a.logger, a.container, part.GetTablePath(), "", a.cfg.QryWorkers) - if err != nil && !ignoreErrors { - return errors.Wrapf(err, "Failed to delete partition '%s'.", part.GetTablePath()) - } - // Delete the Directory object - err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: part.GetTablePath()}) - if err != nil && !ignoreErrors { - return errors.Wrapf(err, "Failed to delete partition object '%s'.", part.GetTablePath()) + deleteParams.From = 0 + deleteParams.To = math.MaxInt64 + } else { + if deleteParams.To == 0 { + deleteParams.To = time.Now().Unix() * 1000 } } - err := a.partitionMngr.DeletePartitionsFromSchema(partitions) + + // Delete Data + err := a.DeletePartitionsData(&deleteParams) if err != nil { return err } + // If no data is left, delete Names folder if len(a.partitionMngr.GetPartitionsPaths()) == 0 { path := filepath.Join(a.cfg.TablePath, config.NamesDirectory) + "/" // Need a trailing slash a.logger.Info("Delete metric names at path '%s'.", path) err := utils.DeleteTable(a.logger, a.container, path, "", a.cfg.QryWorkers) - if err != nil && !ignoreErrors { + if err != nil && !deleteParams.IgnoreErrors { return errors.Wrap(err, "Failed to delete the metric-names table.") } // Delete the Directory object err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) - if err != nil && !ignoreErrors { + if err != nil && !deleteParams.IgnoreErrors { if !utils.IsNotExistsError(err) { return errors.Wrapf(err, "Failed to delete table object '%s'.", path) } } } - if deleteAll { + + // If need to 'deleteAll', delete schema + TSDB table folder + if deleteParams.DeleteAll { // Delete Schema file schemaPath := pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName) a.logger.Info("Delete the TSDB configuration at '%s'.", schemaPath) err := a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: schemaPath}) - if err != nil && !ignoreErrors { + if err != nil && !deleteParams.IgnoreErrors { return errors.New("The configuration at '" + schemaPath + "' cannot be deleted or doesn't exist.") } // Delete the Directory object path := a.cfg.TablePath + "/" err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) - if err != nil && !ignoreErrors { + if err != nil && !deleteParams.IgnoreErrors { if !utils.IsNotExistsError(err) { return errors.Wrapf(err, "Failed to delete table object '%s'.", path) } @@ -302,6 +320,457 @@ func (a *V3ioAdapter) DeleteDB(deleteAll bool, ignoreErrors bool, fromTime int64 return nil } +func (a *V3ioAdapter) DeletePartitionsData(deleteParams *DeleteParams) error { + partitions := a.partitionMngr.PartsForRange(deleteParams.From, deleteParams.To, true) + var entirelyDeletedPartitions []*partmgr.DBPartition + + deleteWholePartition := deleteParams.DeleteAll || (deleteParams.Filter == "" && len(deleteParams.Metrics) == 0) + + fileToDeleteChan := make(chan v3io.Item, 1024) + getItemsTerminationChan := make(chan error, len(partitions)) + deleteTerminationChan := make(chan error, a.cfg.Workers) + numOfGetItemsRoutines := len(partitions) + if len(deleteParams.Metrics) > 0 { + numOfGetItemsRoutines = numOfGetItemsRoutines * len(deleteParams.Metrics) + } + goRoutinesNum := numOfGetItemsRoutines + a.cfg.Workers + onErrorTerminationChannel := make(chan struct{}, goRoutinesNum) + systemAttributesToFetch := []string{config.ObjectNameAttrName, config.MtimeSecsAttributeName, config.MtimeNSecsAttributeName, config.EncodingAttrName, config.MaxTimeAttrName} + var getItemsWorkers, getItemsTerminated, deletesTerminated int + + var getItemsWG sync.WaitGroup + getItemsErrorChan := make(chan error, numOfGetItemsRoutines) + + aggregates := a.GetSchema().PartitionSchemaInfo.Aggregates + hasServerSideAggregations := len(aggregates) != 1 || aggregates[0] != "" + + var aggrMask aggregate.AggrType + var err error + if hasServerSideAggregations { + aggrMask, _, err = aggregate.AggregatesFromStringListWithCount(aggregates) + if err != nil { + return err + } + } + + for i := 0; i <= a.cfg.Workers; i++ { + go deleteObjectWorker(a.container, deleteParams, a.logger, + fileToDeleteChan, deleteTerminationChan, onErrorTerminationChannel, + aggrMask) + } + + for _, part := range partitions { + partitionEntirelyInRange := deleteParams.From <= part.GetStartTime() && deleteParams.To >= part.GetEndTime() + deleteEntirePartitionFolder := partitionEntirelyInRange && deleteWholePartition + + // Delete all files in partition folder and then delete the folder itself + if deleteEntirePartitionFolder { + a.logger.Info("Deleting entire partition '%s'.", part.GetTablePath()) + + getItemsWG.Add(1) + go deleteEntirePartition(a.logger, a.container, part.GetTablePath(), a.cfg.QryWorkers, + &getItemsWG, getItemsErrorChan) + + entirelyDeletedPartitions = append(entirelyDeletedPartitions, part) + // First get all items based on filter+metric+time range then delete what is necessary + } else { + a.logger.Info("Deleting partial partition '%s'.", part.GetTablePath()) + + start, end := deleteParams.From, deleteParams.To + + // Round the start and end times to the nearest aggregation buckets - to later on recalculate server side aggregations + if hasServerSideAggregations { + start = part.GetAggregationBucketStartTime(part.Time2Bucket(deleteParams.From)) + end = part.GetAggregationBucketEndTime(part.Time2Bucket(deleteParams.To)) + } + + var chunkAttributesToFetch []string + + // If we don't want to delete the entire object, fetch also the desired chunks to delete. + if !partitionEntirelyInRange { + chunkAttributesToFetch, _ = part.Range2Attrs("v", start, end) + } + + allAttributes := append(chunkAttributesToFetch, systemAttributesToFetch...) + if len(deleteParams.Metrics) == 0 { + getItemsWorkers++ + input := &v3io.GetItemsInput{Path: part.GetTablePath(), + AttributeNames: allAttributes, + Filter: deleteParams.Filter} + go getItemsWorker(a.logger, a.container, input, part, fileToDeleteChan, getItemsTerminationChan, onErrorTerminationChannel) + } else { + for _, metric := range deleteParams.Metrics { + for _, shardingKey := range part.GetShardingKeys(metric) { + getItemsWorkers++ + input := &v3io.GetItemsInput{Path: part.GetTablePath(), + AttributeNames: allAttributes, + Filter: deleteParams.Filter, + ShardingKey: shardingKey} + go getItemsWorker(a.logger, a.container, input, part, fileToDeleteChan, getItemsTerminationChan, onErrorTerminationChannel) + } + } + } + } + } + a.logger.Debug("issued %v getItems", getItemsWorkers) + + // Waiting fot deleting of full partitions + getItemsWG.Wait() + select { + case err = <-getItemsErrorChan: + fmt.Println("got error", err) + // Signal all other goroutines to quite + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return err + default: + } + + if getItemsWorkers != 0 { + for deletesTerminated < a.cfg.Workers { + select { + case err := <-getItemsTerminationChan: + a.logger.Debug("finished getItems worker, total finished: %v, error: %v", getItemsTerminated+1, err) + if err != nil { + // If requested to ignore non-existing tables do not return error. + if !(deleteParams.IgnoreErrors && utils.IsNotExistsOrConflictError(err)) { + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return errors.Wrapf(err, "GetItems failed during recursive delete.") + } + } + getItemsTerminated++ + + if getItemsTerminated == getItemsWorkers { + close(fileToDeleteChan) + } + case err := <-deleteTerminationChan: + a.logger.Debug("finished delete worker, total finished: %v, err: %v", deletesTerminated+1, err) + if err != nil { + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return errors.Wrapf(err, "Delete failed during recursive delete.") + } + deletesTerminated++ + } + } + } else { + close(fileToDeleteChan) + } + + a.logger.Debug("finished deleting data, removing partitions from schema") + err = a.partitionMngr.DeletePartitionsFromSchema(entirelyDeletedPartitions) + if err != nil { + return err + } + + return nil +} + +func deleteEntirePartition(logger logger.Logger, container v3io.Container, partitionPath string, workers int, + wg *sync.WaitGroup, errChannel chan<- error) { + defer wg.Done() + + err := utils.DeleteTable(logger, container, partitionPath, "", workers) + if err != nil { + errChannel <- errors.Wrapf(err, "Failed to delete partition '%s'.", partitionPath) + } + // Delete the Directory object + err = container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: partitionPath}) + if err != nil { + errChannel <- errors.Wrapf(err, "Failed to delete partition folder '%s'.", partitionPath) + } +} + +func getItemsWorker(logger logger.Logger, container v3io.Container, input *v3io.GetItemsInput, partition *partmgr.DBPartition, + filesToDeleteChan chan<- v3io.Item, terminationChan chan<- error, onErrorTerminationChannel <-chan struct{}) { + for { + select { + case _ = <-onErrorTerminationChannel: + terminationChan <- nil + return + default: + } + + logger.Debug("going to getItems for partition '%v', input: %v", partition.GetTablePath(), *input) + resp, err := container.GetItemsSync(input) + if err != nil { + terminationChan <- err + return + } + resp.Release() + output := resp.Output.(*v3io.GetItemsOutput) + + for _, item := range output.Items { + item["partition"] = partition + + // In case we got error on delete while iterating getItems response + select { + case _ = <-onErrorTerminationChannel: + terminationChan <- nil + return + default: + } + + filesToDeleteChan <- item + } + if output.Last { + terminationChan <- nil + return + } + input.Marker = output.NextMarker + } +} + +func deleteObjectWorker(container v3io.Container, deleteParams *DeleteParams, logger logger.Logger, + filesToDeleteChannel <-chan v3io.Item, terminationChan chan<- error, onErrorTerminationChannel <-chan struct{}, + aggrMask aggregate.AggrType) { + for { + select { + case _ = <-onErrorTerminationChannel: + return + case itemToDelete, ok := <-filesToDeleteChannel: + if !ok { + terminationChan <- nil + return + } + + currentPartition := itemToDelete.GetField("partition").(*partmgr.DBPartition) + fileName, err := itemToDelete.GetFieldString(config.ObjectNameAttrName) + if err != nil { + terminationChan <- err + return + } + fullFileName := pathUtil.Join(currentPartition.GetTablePath(), fileName) + + // Delete whole object + if deleteParams.From <= currentPartition.GetStartTime() && + deleteParams.To >= currentPartition.GetEndTime() { + + logger.Debug("delete entire item '%v' ", fullFileName) + input := &v3io.DeleteObjectInput{Path: fullFileName} + err = container.DeleteObjectSync(input) + if err != nil && !utils.IsNotExistsOrConflictError(err) { + terminationChan <- err + return + } + // Delete partial object - specific chunks or sub-parts of chunks + } else { + mtimeSecs, err := itemToDelete.GetFieldInt(config.MtimeSecsAttributeName) + if err != nil { + terminationChan <- err + return + } + mtimeNSecs, err := itemToDelete.GetFieldInt(config.MtimeNSecsAttributeName) + if err != nil { + terminationChan <- err + return + } + + deleteUpdateExpression := strings.Builder{} + dataEncoding, err := getEncoding(itemToDelete) + if err != nil { + terminationChan <- err + return + } + + var aggregationsByBucket map[int]*aggregate.AggregatesList + if aggrMask != 0 { + aggregationsByBucket = make(map[int]*aggregate.AggregatesList) + aggrBuckets := currentPartition.Times2BucketRange(deleteParams.From, deleteParams.To) + for _, bucketID := range aggrBuckets { + aggregationsByBucket[bucketID] = aggregate.NewAggregatesList(aggrMask) + } + } + + var newMaxTime int64 = math.MaxInt64 + var numberOfExpressionsInUpdate int + for attributeName, value := range itemToDelete { + if strings.HasPrefix(attributeName, "_v") { + // Check whether the whole chunk attribute needed to be deleted or just part of it. + if currentPartition.IsChunkInRangeByAttr(attributeName, deleteParams.From, deleteParams.To) { + deleteUpdateExpression.WriteString("delete(") + deleteUpdateExpression.WriteString(attributeName) + deleteUpdateExpression.WriteString(");") + } else { + currentChunksMaxTime, err := generatePartialChunkDeleteExpression(logger, &deleteUpdateExpression, attributeName, + value.([]byte), dataEncoding, deleteParams, currentPartition, aggregationsByBucket) + if err != nil { + terminationChan <- err + return + } + + // We want to save the earliest max time possible + if currentChunksMaxTime < newMaxTime { + newMaxTime = currentChunksMaxTime + } + } + numberOfExpressionsInUpdate++ + } + } + + dbMaxTime := int64(itemToDelete.GetField(config.MaxTimeAttrName).(int)) + + // Update the partition's max time if needed. + if deleteParams.From < dbMaxTime && deleteParams.To >= dbMaxTime { + if deleteParams.From < newMaxTime { + newMaxTime = deleteParams.From + } + + deleteUpdateExpression.WriteString(fmt.Sprintf("%v=%v;", config.MaxTimeAttrName, newMaxTime)) + } + + if deleteUpdateExpression.Len() > 0 { + // If there are server aggregates, update the needed buckets + if aggrMask != 0 { + for bucket, aggregations := range aggregationsByBucket { + numberOfExpressionsInUpdate = numberOfExpressionsInUpdate + len(*aggregations) + + // Due to engine limitation, If we reached maximum number of expressions in an UpdateItem + // we need to break the update into chunks + // TODO: refactor in 2.8: + // in 2.8 there is a better way of doing it by uniting multiple update expressions into + // one expression by range in a form similar to `_v_sum[15...100]=0` + if numberOfExpressionsInUpdate < maxExpressionsInUpdateItem { + deleteUpdateExpression.WriteString(aggregations.SetExpr("v", bucket)) + } else { + exprStr := deleteUpdateExpression.String() + logger.Debug("delete item '%v' with expression '%v'", fullFileName, exprStr) + mtimeSecs, mtimeNSecs, err = sendUpdateItem(fullFileName, exprStr, mtimeSecs, mtimeNSecs, container) + if err != nil { + terminationChan <- err + return + } + + // Reset stuff for next update iteration + numberOfExpressionsInUpdate = 0 + deleteUpdateExpression.Reset() + } + } + } + + // If any expressions are left, save them + if deleteUpdateExpression.Len() > 0 { + exprStr := deleteUpdateExpression.String() + logger.Debug("delete item '%v' with expression '%v'", fullFileName, exprStr) + _, _, err = sendUpdateItem(fullFileName, exprStr, mtimeSecs, mtimeNSecs, container) + if err != nil { + terminationChan <- err + return + } + } + } + } + } + } +} + +func sendUpdateItem(path, expr string, mtimeSecs, mtimeNSecs int, container v3io.Container) (int, int, error) { + condition := fmt.Sprintf("%v == %v and %v == %v", + config.MtimeSecsAttributeName, mtimeSecs, + config.MtimeNSecsAttributeName, mtimeNSecs) + + input := &v3io.UpdateItemInput{Path: path, + Expression: &expr, + Condition: condition} + + response, err := container.UpdateItemSync(input) + if err != nil && !utils.IsNotExistsOrConflictError(err) { + returnError := err + if isFalseConditionError(err) { + returnError = errors.Wrapf(err, "Item '%v' was updated while deleting occurred. Please disable any ingestion and retry.", path) + } + return 0, 0, returnError + } + + output := response.Output.(*v3io.UpdateItemOutput) + return output.MtimeSecs, output.MtimeNSecs, nil +} + +func getEncoding(itemToDelete v3io.Item) (chunkenc.Encoding, error) { + var encoding chunkenc.Encoding + encodingStr, ok := itemToDelete.GetField(config.EncodingAttrName).(string) + // If we don't have the encoding attribute, use XOR as default. (for backwards compatibility) + if !ok { + encoding = chunkenc.EncXOR + } else { + intEncoding, err := strconv.Atoi(encodingStr) + if err != nil { + return 0, fmt.Errorf("error parsing encoding type of chunk, got: %v, error: %v", encodingStr, err) + } else { + encoding = chunkenc.Encoding(intEncoding) + } + } + + return encoding, nil +} + +func generatePartialChunkDeleteExpression(logger logger.Logger, expr *strings.Builder, + attributeName string, value []byte, encoding chunkenc.Encoding, deleteParams *DeleteParams, + partition *partmgr.DBPartition, aggregationsByBucket map[int]*aggregate.AggregatesList) (int64, error) { + chunk, err := chunkenc.FromData(logger, encoding, value, 0) + if err != nil { + return 0, err + } + + newChunk := chunkenc.NewChunk(logger, encoding == chunkenc.EncVariant) + appender, err := newChunk.Appender() + if err != nil { + return 0, err + } + + var currentMaxTime int64 + var remainingItemsCount int + iter := chunk.Iterator() + for iter.Next() { + var t int64 + var v interface{} + if encoding == chunkenc.EncXOR { + t, v = iter.At() + } else { + t, v = iter.AtString() + } + + // Append back only events that are not in the delete range + if t < deleteParams.From || t > deleteParams.To { + remainingItemsCount++ + appender.Append(t, v) + + // Calculate server-side aggregations + if aggregationsByBucket != nil { + currentAgg, ok := aggregationsByBucket[partition.Time2Bucket(t)] + // A chunk may contain more data then needed for the aggregations, if this is the case do not aggregate + if ok { + currentAgg.Aggregate(t, v) + } + } + + // Update current chunk's new max time + if t > currentMaxTime { + currentMaxTime = t + } + } + } + + if remainingItemsCount == 0 { + expr.WriteString("delete(") + expr.WriteString(attributeName) + expr.WriteString(");") + currentMaxTime, _ = partition.GetChunkStartTimeByAttr(attributeName) + } else { + bytes := appender.Chunk().Bytes() + val := base64.StdEncoding.EncodeToString(bytes) + + expr.WriteString(fmt.Sprintf("%s=blob('%s'); ", attributeName, val)) + } + + return currentMaxTime, nil + +} + // Return the number of items in a TSDB table func (a *V3ioAdapter) CountMetrics(part string) (int, error) { count := 0 @@ -360,3 +829,16 @@ type Appender interface { Rollback() error Close() } + +// Check if the current error was caused specifically because the condition was evaluated to false. +func isFalseConditionError(err error) bool { + errString := err.Error() + + if strings.Count(errString, errorCodeString) == 2 && + strings.Contains(errString, falseConditionOuterErrorCode) && + strings.Contains(errString, falseConditionInnerErrorCode) { + return true + } + + return false +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go index db244d2f..18aac56d 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go @@ -24,20 +24,14 @@ package tsdb_test import ( "encoding/json" - "fmt" - "math" - "path" "sort" - "strings" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" - "github.com/v3io/v3io-tsdb/pkg/partmgr" . "github.com/v3io/v3io-tsdb/pkg/tsdb" "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils" @@ -47,7 +41,6 @@ import ( const defaultStepMs = 5 * tsdbtest.MinuteInMillis // 5 minutes func TestIngestData(t *testing.T) { - timestamp := fmt.Sprintf("%d", time.Now().Unix()) //time.Now().Format(time.RFC3339) testCases := []struct { desc string params tsdbtest.TestParams @@ -105,46 +98,6 @@ func TestIngestData(t *testing.T) { }}}, ), }, - {desc: "Should drop values of incompatible data types (prepare data for: IG-13146)", - params: tsdbtest.NewTestParams(t, - tsdbtest.TestOption{ - Key: tsdbtest.OptTimeSeries, - Value: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "IG13146", - Labels: utils.LabelsFromStringList("test", "IG-13146", "float", "string"), - Data: []tsdbtest.DataPoint{ - {Time: 15, Value: 0.1}, // first add float value - {Time: 20, Value: "some string value"}, // then attempt to add string value - {Time: 30, Value: 0.2}, // and finally add another float value - }, - ExpectedCount: func() *int { var expectedCount = 2; return &expectedCount }(), - }}}, - tsdbtest.TestOption{ - Key: "override_test_name", - Value: fmt.Sprintf("IG-13146-%s", timestamp)}), - }, - {desc: "IG-13146: Should reject values of incompatible data types without data corruption", - params: tsdbtest.NewTestParams(t, - tsdbtest.TestOption{ - Key: tsdbtest.OptTimeSeries, - Value: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "IG13146", - Labels: utils.LabelsFromStringList("test", "IG-13146", "float", "string"), - Data: []tsdbtest.DataPoint{ - {Time: 50, Value: "another string value"}, // then attempt to add string value - {Time: 60, Value: 0.4}, // valid values from this batch will be dropped - {Time: 70, Value: 0.3}, // because processing of entire batch will stop - }, - ExpectedCount: func() *int { var expectedCount = 2; return &expectedCount }(), - }}}, - tsdbtest.TestOption{ - Key: "override_test_name", - Value: fmt.Sprintf("IG-13146-%s", timestamp)}, - tsdbtest.TestOption{ - Key: "expected_error_contains_string", - // Note, the expected error message should align with pkg/appender/ingest.go:308 - Value: "trying to ingest values of incompatible data type"}), - }, } for _, test := range testCases { @@ -187,26 +140,13 @@ func testIngestDataCase(t *testing.T, testParams tsdbtest.TestParams) { } if _, err := appender.WaitForCompletion(0); err != nil { - if !isExpected(testParams, err) { - t.Fatalf("Failed to wait for appender completion. reason: %s", err) - } + t.Fatalf("Failed to wait for appender completion. reason: %s", err) } - expectedCount := len(dp.Data) - if dp.ExpectedCount != nil { - expectedCount = *dp.ExpectedCount - } - tsdbtest.ValidateCountOfSamples(t, adapter, dp.Name, expectedCount, from, to, -1) + tsdbtest.ValidateCountOfSamples(t, adapter, dp.Name, len(dp.Data), from, to, -1) } } -func isExpected(testParams tsdbtest.TestParams, actualErr error) bool { - if errMsg, ok := testParams["expected_error_contains_string"]; ok { - return strings.Contains(actualErr.Error(), fmt.Sprintf("%v", errMsg)) - } - return false -} - func TestIngestDataWithSameTimestamp(t *testing.T) { baseTime := int64(1532209200000) testParams := tsdbtest.NewTestParams(t, @@ -615,18 +555,8 @@ func testQueryDataCase(test *testing.T, testParams tsdbtest.TestParams, filter s if err != nil { test.Fatal(err) } - - for _, data := range expected[currentAggregate] { - var equalCount = 0 - for _, dp := range actual { - if dp.Equals(data) { - equalCount++ - continue - } - } - assert.Equal(test, equalCount, len(expected[currentAggregate]), - "Check failed for aggregate='%s'. Query aggregates: %s", currentAggregate, queryAggregates) - } + assert.ElementsMatch(test, expected[currentAggregate], actual, + "Check failed for aggregate='%s'. Query aggregates: %s", currentAggregate, queryAggregates) } if set.Err() != nil { @@ -752,14 +682,7 @@ func testQueryDataOverlappingWindowCase(test *testing.T, v3ioConfig *config.V3io } assert.EqualValues(test, len(windows), len(actual)) for _, data := range expected[agg] { - var equalCount = 0 - for _, dp := range actual { - if dp.Equals(data) { - equalCount++ - continue - } - } - assert.Equal(test, equalCount, len(expected[agg])) + assert.Contains(test, actual, data) } } @@ -840,17 +763,7 @@ func TestIgnoreNaNWhenSeekingAggSeries(t *testing.T) { } actual = append(actual, tsdbtest.DataPoint{Time: t1, Value: v1}) } - - for _, data := range expected[agg] { - var equalCount = 0 - for _, dp := range actual { - if dp.Equals(data) { - equalCount++ - continue - } - } - assert.Equal(t, equalCount, len(expected[agg])) - } + assert.ElementsMatch(t, expected[agg], actual) } if set.Err() != nil { @@ -923,8 +836,7 @@ func TestDeleteTSDB(t *testing.T) { t.Fatal(res.Error.Error()) } - now := time.Now().Unix() * 1000 // now time in millis - if err := adapter.DeleteDB(true, true, 0, now); err != nil { + if err := adapter.DeleteDB(DeleteParams{DeleteAll: true, IgnoreErrors: true}); err != nil { t.Fatalf("Failed to delete DB on teardown. reason: %s", err) } @@ -937,269 +849,6 @@ func TestDeleteTSDB(t *testing.T) { } } -func TestDeleteTable(t *testing.T) { - ta, _ := time.Parse(time.RFC3339, "2018-10-03T05:00:00Z") - t1 := ta.Unix() * 1000 - tb, _ := time.Parse(time.RFC3339, "2018-10-07T05:00:00Z") - t2 := tb.Unix() * 1000 - tc, _ := time.Parse(time.RFC3339, "2018-10-11T05:00:00Z") - t3 := tc.Unix() * 1000 - td, _ := time.Parse(time.RFC3339, "now + 1w") - futurePoint := td.Unix() * 1000 - - testCases := []struct { - desc string - deleteFrom int64 - deleteTo int64 - deleteAll bool - ignoreErrors bool - data []tsdbtest.DataPoint - expected []tsdbtest.DataPoint - ignoreReason string - }{ - {desc: "Should delete all table by time", - deleteFrom: 0, - deleteTo: 9999999999999, - deleteAll: false, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}}, - expected: []tsdbtest.DataPoint{}, - }, - {desc: "Should delete all table by deleteAll", - deleteFrom: 0, - deleteTo: 0, - deleteAll: true, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}, - {Time: futurePoint, Value: 555.5}}, - expected: []tsdbtest.DataPoint{}, - }, - {desc: "Should skip partial partition at begining", - deleteFrom: t1 - 10000, - deleteTo: 9999999999999, - deleteAll: false, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}}, - expected: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}}, - }, - {desc: "Should skip partial partition at end", - deleteFrom: 0, - deleteTo: t3 + 10000, - deleteAll: false, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}}, - expected: []tsdbtest.DataPoint{{Time: t3, Value: 444.4}}, - }, - {desc: "Should skip partial partition at beginning and end not in range", - deleteFrom: t1 + 10000, - deleteTo: t3 - 10000, - deleteAll: false, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}}, - expected: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t3, Value: 444.4}}, - }, - {desc: "Should skip partial partition at beginning and end although in range", - deleteFrom: t1 - 10000, - deleteTo: t3 + 10000, - deleteAll: false, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}}, - expected: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t3, Value: 444.4}}, - }, - } - - for _, test := range testCases { - t.Run(test.desc, func(t *testing.T) { - if test.ignoreReason != "" { - t.Skip(test.ignoreReason) - } - testDeleteTSDBCase(t, - tsdbtest.NewTestParams(t, - tsdbtest.TestOption{ - Key: tsdbtest.OptDropTableOnTearDown, - Value: !test.deleteAll}, - tsdbtest.TestOption{ - Key: tsdbtest.OptTimeSeries, - Value: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "metricToDelete", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: test.data, - }}}, - ), - test.deleteFrom, test.deleteTo, test.ignoreErrors, test.deleteAll, test.expected) - }) - } -} - -func testDeleteTSDBCase(test *testing.T, testParams tsdbtest.TestParams, deleteFrom int64, deleteTo int64, ignoreErrors bool, deleteAll bool, - expected []tsdbtest.DataPoint) { - - adapter, teardown := tsdbtest.SetUpWithData(test, testParams) - defer teardown() - - container, err := utils.CreateContainer(adapter.GetLogger("container"), testParams.V3ioConfig(), adapter.HttpTimeout) - if err != nil { - test.Fatalf("failed to create new container. reason: %s", err) - } - pm, err := partmgr.NewPartitionMngr(adapter.GetSchema(), container, testParams.V3ioConfig()) - if err != nil { - test.Fatalf("Failed to create new partition manager. reason: %s", err) - } - - initiaPartitions := pm.PartsForRange(0, math.MaxInt64, true) - initialNumberOfPartitions := len(initiaPartitions) - - partitionsToDelete := pm.PartsForRange(deleteFrom, deleteTo, false) - - if err := adapter.DeleteDB(deleteAll, ignoreErrors, deleteFrom, deleteTo); err != nil { - test.Fatalf("Failed to delete DB. reason: %s", err) - } - - if !deleteAll { - pm1, err := partmgr.NewPartitionMngr(adapter.GetSchema(), container, testParams.V3ioConfig()) - remainingParts := pm1.PartsForRange(0, math.MaxInt64, false) - assert.Equal(test, len(remainingParts), initialNumberOfPartitions-len(partitionsToDelete)) - - qry, err := adapter.Querier(nil, 0, math.MaxInt64) - if err != nil { - test.Fatalf("Failed to create Querier. reason: %v", err) - } - - for _, metric := range testParams.TimeSeries() { - set, err := qry.Select(metric.Name, "", 0, "") - if err != nil { - test.Fatalf("Failed to run Select. reason: %v", err) - } - - set.Next() - if set.Err() != nil { - test.Fatalf("Failed to query metric. reason: %v", set.Err()) - } - - series := set.At() - if series == nil && len(expected) == 0 { - //table is expected to be empty - } else if series != nil { - iter := series.Iterator() - if iter.Err() != nil { - test.Fatalf("Failed to query data series. reason: %v", iter.Err()) - } - - actual, err := iteratorToSlice(iter) - if err != nil { - test.Fatal(err) - } - assert.ElementsMatch(test, expected, actual) - } else { - test.Fatalf("Result series is empty while expected result set is not!") - } - } - } else { - container, tablePath := adapter.GetContainer() - tableSchemaPath := path.Join(tablePath, config.SchemaConfigFileName) - - // Validate: schema does not exist - _, err := container.GetObjectSync(&v3io.GetObjectInput{Path: tableSchemaPath}) - if err != nil { - if utils.IsNotExistsError(err) { - // OK - expected - } else { - test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tableSchemaPath, err) - } - } - - // Validate: table does not exist - _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: tablePath}) - if err != nil { - if utils.IsNotExistsError(err) { - // OK - expected - } else { - test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tablePath, err) - } - } - } -} - -func TestIngestDataFloatThenString(t *testing.T) { - testParams := tsdbtest.NewTestParams(t) - - defer tsdbtest.SetUp(t, testParams)() - - adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) - if err != nil { - t.Fatalf("Failed to create v3io adapter. reason: %s", err) - } - - appender, err := adapter.Appender() - if err != nil { - t.Fatalf("Failed to get appender. reason: %s", err) - } - - labels := utils.Labels{utils.Label{Name: "__name__", Value: "cpu"}} - _, err = appender.Add(labels, 1532940510000, 12.0) - if err != nil { - t.Fatalf("Failed to add data to appender. reason: %s", err) - } - - _, err = appender.Add(labels, 1532940610000, "tal") - if err == nil { - t.Fatal("expected failure but finished successfully") - } - - if _, err := appender.WaitForCompletion(0); err != nil { - t.Fatalf("Failed to wait for appender completion. reason: %s", err) - } - - tsdbtest.ValidateCountOfSamples(t, adapter, "cpu", 1, 0, 1532950510000, -1) -} - -func TestIngestDataStringThenFloat(t *testing.T) { - testParams := tsdbtest.NewTestParams(t) - - defer tsdbtest.SetUp(t, testParams)() - - adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) - if err != nil { - t.Fatalf("Failed to create v3io adapter. reason: %s", err) - } - - appender, err := adapter.Appender() - if err != nil { - t.Fatalf("Failed to get appender. reason: %s", err) - } - - labels := utils.Labels{utils.Label{Name: "__name__", Value: "cpu"}} - _, err = appender.Add(labels, 1532940510000, "tal") - if err != nil { - t.Fatalf("Failed to add data to appender. reason: %s", err) - } - - _, err = appender.Add(labels, 1532940610000, 666.0) - if err == nil { - t.Fatal("expected failure but finished successfully") - } - - if _, err := appender.WaitForCompletion(0); err != nil { - t.Fatalf("Failed to wait for appender completion. reason: %s", err) - } - - tsdbtest.ValidateCountOfSamples(t, adapter, "cpu", 1, 0, 1532950510000, -1) -} - func iteratorToSlice(it chunkenc.Iterator) ([]tsdbtest.DataPoint, error) { var result []tsdbtest.DataPoint for it.Next() { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go index dbda9752..9d33c58a 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go @@ -30,17 +30,21 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb" "github.com/v3io/v3io-tsdb/pkg/utils" ) type delCommandeer struct { cmd *cobra.Command rootCommandeer *RootCommandeer - deleteAll bool - ignoreErrors bool force bool - fromTime string - toTime string + + deleteAll bool + ignoreErrors bool + fromTime string + toTime string + filter string + metrics string } func newDeleteCommandeer(rootCommandeer *RootCommandeer) *delCommandeer { @@ -66,6 +70,9 @@ Notes: metric items with older or newer times. Use the info command to view the partitioning interval.`, RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + return errors.New("delete does not accept unnamed arguments. Did you forget to use a flag?") + } // Initialize parameters return commandeer.delete() }, @@ -81,6 +88,10 @@ Notes: "End (maximum) time for the delete operation, as a string containing an\nRFC 3339 time string, a Unix timestamp in milliseconds, or a relative\ntime of the format \"now\" or \"now-[0-9]+[mhd]\" (where 'm' = minutes,\n'h' = hours, and 'd' = days). Examples: \"2018-09-26T14:10:20Z\";\n\"1537971006000\"; \"now-3h\"; \"now-7d\". (default \"now\")") cmd.Flags().StringVarP(&commandeer.fromTime, "begin", "b", "", "Start (minimum) time for the delete operation, as a string containing\nan RFC 3339 time, a Unix timestamp in milliseconds, a relative time of\nthe format \"now\" or \"now-[0-9]+[mhd]\" (where 'm' = minutes, 'h' = hours,\nand 'd' = days), or 0 for the earliest time. Examples:\n\"2016-01-02T15:34:26Z\"; \"1451748866\"; \"now-90m\"; \"0\". (default =\n - 1h)") + cmd.Flags().StringVar(&commandeer.filter, "filter", "", + "Query filter, as an Iguazio Data Science Platform\nfilter expression. \nExamples: \"method=='get'\"; \"method=='get' AND os=='win'\".") + cmd.Flags().StringVarP(&commandeer.metrics, "metrics", "m", "", + "Comma-separated list of metric names to delete. If you don't set this argument, all metrics will be deleted according to the time range and filter specified.") commandeer.cmd = cmd return commandeer @@ -128,7 +139,20 @@ func (dc *delCommandeer) delete() error { } } - err = dc.rootCommandeer.adapter.DeleteDB(dc.deleteAll, dc.ignoreErrors, from, to) + var metricsToDelete []string + if dc.metrics != "" { + for _, m := range strings.Split(dc.metrics, ",") { + metricsToDelete = append(metricsToDelete, strings.TrimSpace(m)) + } + } + + params := tsdb.DeleteParams{DeleteAll: dc.deleteAll, + IgnoreErrors: dc.ignoreErrors, + From: from, + To: to, + Metrics: metricsToDelete, + Filter: dc.filter} + err = dc.rootCommandeer.adapter.DeleteDB(params) if err != nil { return errors.Wrapf(err, "Failed to delete %s TSDB table '%s' in container '%s'.", partialMsg, dc.rootCommandeer.v3iocfg.TablePath, dc.rootCommandeer.v3iocfg.Container) } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go index e7f32bbb..cf34e8db 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go @@ -37,21 +37,16 @@ func IsNotExistsError(err error) bool { return false } -const ( - errorCodeString = "ErrorCode" - falseConditionOuterErrorCode = "16777244" - falseConditionInnerErrorCode = "16777245" -) - -// Check if the current error was caused specifically because the condition was evaluated to false. -func IsFalseConditionError(err error) bool { - errString := err.Error() - - if strings.Count(errString, errorCodeString) == 2 && - strings.Contains(errString, falseConditionOuterErrorCode) && - strings.Contains(errString, falseConditionInnerErrorCode) { +func IsNotExistsOrConflictError(err error) bool { + errorWithStatusCode, ok := err.(v3ioerrors.ErrorWithStatusCode) + if !ok { + // error of different type + return false + } + statusCode := errorWithStatusCode.StatusCode() + // Ignore 404s and 409s + if statusCode == http.StatusNotFound || statusCode == http.StatusConflict { return true } - return false } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go index 694b5b2a..dc0fa165 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go @@ -157,19 +157,7 @@ func BenchmarkIngest(b *testing.B) { } func isValidDataPoint(prev, current *tsdbtest.DataPoint) bool { - if current.Time > prev.Time { - switch cv := current.Value.(type) { - case float64: - if pv, ok := prev.Value.(float64); ok { - return int64(cv)-int64(pv) == 1 - } - case string: - return true - default: - return false - } - } - return false + return int64(current.Value)-int64(prev.Value) == 1 && current.Time > prev.Time } func runTest( diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/errors.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/errors.go deleted file mode 100644 index dc70312e..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/errors.go +++ /dev/null @@ -1,285 +0,0 @@ -// Package errors provides an api similar to github.com/nuclio/nuclio/pkg/errors -// However we don't carry stack trace around for performance -// (see https://github.com/pkg/errors/issues/124) -package errors - -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported -// -// %s print the error -// %+v extended format. Will print stack trace of errors - -import ( - "bytes" - "fmt" - "io" - "os" - "runtime" - "strings" -) - -var ( - // ShowLineInfo sets if we collect location information (file, line) - // (getting location information makes creating error slower ~550ns vs 2ns) - ShowLineInfo bool -) - -// Error implements error interface with call stack -type Error struct { - message string - cause error - fileName string - lineNumber int -} - -func init() { - ShowLineInfo = len(os.Getenv("NUCLIO_NO_ERROR_LINE_INFO")) == 0 -} - -// caller return the caller informatin (file, line) -// Note this is sensitive to where it's called -func caller() (string, int) { - pcs := make([]uintptr, 1) - // skip 3 levels to get to the caller - n := runtime.Callers(3, pcs) - if n == 0 { - return "", 0 - } - - pc := pcs[0] - 1 - fn := runtime.FuncForPC(pc) - if fn == nil { - return "", 0 - } - - return fn.FileLine(pc) -} - -// New returns a new error -func New(message string) error { - err := &Error{message: message} - if ShowLineInfo { - err.fileName, err.lineNumber = caller() - } - return err -} - -// Errorf returns a new Error -func Errorf(format string, args ...interface{}) error { - err := &Error{message: fmt.Sprintf(format, args...)} - if ShowLineInfo { - err.fileName, err.lineNumber = caller() - } - return err -} - -// Wrap returns a new error with err as cause, if err is nil will return nil -func Wrap(err error, message string) error { - if err == nil { - return nil - } - - errObj := &Error{ - message: message, - cause: err, - } - - if ShowLineInfo { - errObj.fileName, errObj.lineNumber = caller() - } - return errObj -} - -// Wrapf returns a new error with err as cause, if err is nil will return nil -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - - message := fmt.Sprintf(format, args...) - errObj := &Error{ - message: message, - cause: err, - } - if ShowLineInfo { - errObj.fileName, errObj.lineNumber = caller() - } - return errObj -} - -// Error is the string representation of the error -func (err *Error) Error() string { - return err.message -} - -// Cause returns the cause of the error -func (err *Error) Cause() error { - return err.cause -} - -func asError(err error) *Error { - errObj, ok := err.(*Error) - if !ok { - return nil - } - return errObj -} - -// LineInfo info returns the location (file, line) where the error was created -func (err *Error) LineInfo() (string, int) { - return err.fileName, err.lineNumber -} - -// reverse reverses a slice in place -func reverse(slice []error) { - for left, right := 0, len(slice)-1; left < right; left, right = left+1, right-1 { - slice[left], slice[right] = slice[right], slice[left] - } -} - -// GetErrorStack return stack of messges (oldest on top) -// if n == -1 returns the whole stack -func GetErrorStack(err error, depth int) []error { - errors := []error{err} - - errObj := asError(err) - if errObj == nil { - return errors - } - - for errObj = asError(errObj.cause); errObj != nil; errObj = asError(errObj.cause) { - errors = append(errors, errObj) - } - - reverse(errors) - if depth > 0 { - if depth > len(errors) { - depth = len(errors) - } - errors = errors[:depth] - } - return errors -} - -// GetErrorStackString returns the error stack as a string -func GetErrorStackString(err error, depth int) string { - buffer := bytes.Buffer{} - - PrintErrorStack(&buffer, err, depth) - - return buffer.String() -} - -// PrintErrorStack prints the error stack into out up to depth levels -// If n == 1 then prints the whole stack -func PrintErrorStack(out io.Writer, err error, depth int) { - if err == nil { - return - } - - pathLen := 40 - - stack := GetErrorStack(err, depth) - errObj := asError(stack[0]) - - if errObj != nil && errObj.lineNumber != 0 { - cause := errObj.Error() - if errObj.cause != nil { - cause = errObj.cause.Error() - } - - fmt.Fprintf(out, "\nError - %s", cause) // nolint: errcheck - fmt.Fprintf(out, "\n %s:%d\n", trimPath(errObj.fileName, pathLen), errObj.lineNumber) // nolint: errcheck - } else { - fmt.Fprintf(out, "\nError - %s", stack[0].Error()) // nolint: errcheck - } - - fmt.Fprintf(out, "\nCall stack:") // nolint: errcheck - - for _, e := range stack { - errObj := asError(e) - fmt.Fprintf(out, "\n%s", e.Error()) // nolint: errcheck - if errObj != nil && errObj.lineNumber != 0 { - fmt.Fprintf(out, "\n %s:%d", trimPath(errObj.fileName, pathLen), errObj.lineNumber) // nolint: errcheck - } - } - - out.Write([]byte{'\n'}) // nolint: errcheck -} - -// Cause is the cause of the error -func Cause(err error) error { - var cause error - - if err == nil { - return nil - } - - errAsError := asError(err) - if errAsError != nil { - cause = errAsError.cause - } - - // treat the err as simply an error - if cause == nil { - cause = err - } - - return cause -} - -// RootCause is the cause of the error -func RootCause(err error) error { - currentErr := err - for { - cause := Cause(currentErr) - - // if there's a cause go deeper - if cause == nil || cause == currentErr { - break - } - - currentErr = cause - } - - return currentErr -} - -// sumLengths return sum of lengths of strings -func sumLengths(parts []string) int { - total := 0 - for _, s := range parts { - total += len(s) - } - return total -} - -// trimPath shortens fileName to be at most size characters -func trimPath(fileName string, size int) string { - if len(fileName) <= size { - return fileName - } - - // We'd like to cut at directory boundary - parts := strings.Split(fileName, "/") - for sumLengths(parts) > size && len(parts) > 1 { - parts = parts[1:] - } - - return ".../" + strings.Join(parts, "/") -} - -// Format formats an error -func (err *Error) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - PrintErrorStack(s, err, -1) - } - fallthrough - case 's': - fmt.Fprintf(s, err.Error()) // nolint: errcheck - case 'q': - fmt.Fprintf(s, "%q", err.Error()) // nolint: errcheck - } -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.mod b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.mod deleted file mode 100644 index c242fb09..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/nuclio/errors - -go 1.12 diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.sum b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.sum deleted file mode 100644 index e69de29b..00000000 diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.gitignore b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.gitignore deleted file mode 100644 index 485dee64..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.travis.yml b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.travis.yml deleted file mode 100644 index 8d528872..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go -go_import_path: github.com/nuclio/nuclio-sdk-go -go: - - "1.10" - - "1.9" -script: make test diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/HACK.md b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/HACK.md deleted file mode 100644 index bda4d560..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/HACK.md +++ /dev/null @@ -1,6 +0,0 @@ -# Hacking on nuclio-sdk - -## errors.go - -`errors.go` is automatically generated. If you bump Go version or suspect there -might be changes, run `go generate` to generate it. diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/LICENSE b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/LICENSE deleted file mode 100644 index 8dada3ed..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/Makefile b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/Makefile deleted file mode 100644 index c5f3b06c..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2017 The Nuclio Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -test: lint - go test -v . - -lint: deps - go get -u github.com/pavius/impi/cmd/impi - go get -u gopkg.in/alecthomas/gometalinter.v2 - @$(GOPATH)/bin/gometalinter.v2 --install - @echo Verifying imports... - $(GOPATH)/bin/impi \ - --local github.com/nuclio/nuclio/ \ - --scheme stdLocalThirdParty \ - ./... - @echo Linting... - @$(GOPATH)/bin/gometalinter.v2 \ - --deadline=300s \ - --disable-all \ - --enable-gc \ - --enable=deadcode \ - --enable=goconst \ - --enable=gofmt \ - --enable=golint \ - --enable=gosimple \ - --enable=ineffassign \ - --enable=interfacer \ - --enable=misspell \ - --enable=staticcheck \ - --enable=staticcheck \ - --enable=unconvert \ - --enable=varcheck \ - --enable=vet \ - --enable=vetshadow \ - --exclude="_test.go" \ - --exclude="comment on" \ - --exclude="error should be the last" \ - --exclude="should have comment" \ - . - - @echo Done. - -deps: - go get -u github.com/nuclio/logger diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/README.md b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/README.md deleted file mode 100644 index e5c53649..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Go SDK for nuclio - -To get started with nuclio, see https://github.com/nuclio/nuclio. \ No newline at end of file diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/context.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/context.go deleted file mode 100644 index 83bb405c..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/context.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -import "github.com/nuclio/logger" - -// Context holds objects whose lifetime is that of the function instance -type Context struct { - - // Logger allows submitting information to logger sinks configured in the platform - Logger logger.Logger - - // DataBinding holds a map of . For example, if the user - // configured the function to bind to an Azure Event Hub, it will hold an instance of an Event Hub - // client. The user can type cast this to the client type - DataBinding map[string]DataBinding - - // WorkerID holds the unique identifier of the worker currently handling the event. It can be used - // to key into shared datasets to prevent locking - WorkerID int - - // UserData is nil by default. This holds information set by the user should he need access to long - // living data. The lifetime of this pointer is that of the _worker_ and workers can come and go. - // Treat this like cache - always check if it's nil prior to access and re-populate if necessary - UserData interface{} - - // FunctionName holds the name of the function currently running - FunctionName string - - // FunctionVersion holds the version of the function currently running - FunctionVersion int - - // TriggerName holds the information about the invoking trigger in this context - TriggerName string -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/databinding.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/databinding.go deleted file mode 100644 index 6241bdce..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/databinding.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -// DataBinding defines a generic interface to data sources configured in the function. For the time being -// there is no "abstract" data interface and user will cast this to the specific data source client -type DataBinding interface{} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/doc.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/doc.go deleted file mode 100644 index d10468fb..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -/* -SDK for working with Nuclio - -See README.md for more details. -*/ diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errgen.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errgen.go deleted file mode 100644 index 05e7894c..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errgen.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//go:generate go run gen_errors.go -//go:generate go fmt errors.go - -package nuclio - -// This file exists only to generate errors.go -// To do that - run: go generate diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errors.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errors.go deleted file mode 100644 index 5de57ca3..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errors.go +++ /dev/null @@ -1,1223 +0,0 @@ -// Automatically generated by gen_errors.go - -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -import ( - "errors" - "fmt" - "net/http" -) - -// WithStatusCode is an error with status code -type WithStatusCode interface { - StatusCode() int -} - -// ErrorWithStatusCode implements both error and WithStatusCode -type ErrorWithStatusCode struct { - error - statusCode int -} - -// GetError returns the underlying error -func (e *ErrorWithStatusCode) GetError() error { - return e.error -} - -// StatusCode returns the status code -func (e *ErrorWithStatusCode) StatusCode() int { - return e.statusCode -} - -// Error returns the error message -func (e ErrorWithStatusCode) Error() string { - if e.error != nil { - return e.error.Error() - } - - message, ok := defaultMessages[e.statusCode] - if !ok { - message = fmt.Sprintf("Unknown error: %d", e.statusCode) - } - - return message -} - -// ErrAccepted is a StatusAccepted Error -var ErrAccepted = ErrorWithStatusCode{statusCode: http.StatusAccepted} - -// NewErrAccepted returns a new ErrAccepted with custom error message -func NewErrAccepted(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusAccepted, - } -} - -// WrapErrAccepted returns a new ErrAccepted, wrapping an existing error -func WrapErrAccepted(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusAccepted, - } -} - -// ErrAlreadyReported is a StatusAlreadyReported Error -var ErrAlreadyReported = ErrorWithStatusCode{statusCode: http.StatusAlreadyReported} - -// NewErrAlreadyReported returns a new ErrAlreadyReported with custom error message -func NewErrAlreadyReported(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusAlreadyReported, - } -} - -// WrapErrAlreadyReported returns a new ErrAlreadyReported, wrapping an existing error -func WrapErrAlreadyReported(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusAlreadyReported, - } -} - -// ErrBadGateway is a StatusBadGateway Error -var ErrBadGateway = ErrorWithStatusCode{statusCode: http.StatusBadGateway} - -// NewErrBadGateway returns a new ErrBadGateway with custom error message -func NewErrBadGateway(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusBadGateway, - } -} - -// WrapErrBadGateway returns a new ErrBadGateway, wrapping an existing error -func WrapErrBadGateway(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusBadGateway, - } -} - -// ErrBadRequest is a StatusBadRequest Error -var ErrBadRequest = ErrorWithStatusCode{statusCode: http.StatusBadRequest} - -// NewErrBadRequest returns a new ErrBadRequest with custom error message -func NewErrBadRequest(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusBadRequest, - } -} - -// WrapErrBadRequest returns a new ErrBadRequest, wrapping an existing error -func WrapErrBadRequest(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusBadRequest, - } -} - -// ErrConflict is a StatusConflict Error -var ErrConflict = ErrorWithStatusCode{statusCode: http.StatusConflict} - -// NewErrConflict returns a new ErrConflict with custom error message -func NewErrConflict(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusConflict, - } -} - -// WrapErrConflict returns a new ErrConflict, wrapping an existing error -func WrapErrConflict(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusConflict, - } -} - -// ErrContinue is a StatusContinue Error -var ErrContinue = ErrorWithStatusCode{statusCode: http.StatusContinue} - -// NewErrContinue returns a new ErrContinue with custom error message -func NewErrContinue(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusContinue, - } -} - -// WrapErrContinue returns a new ErrContinue, wrapping an existing error -func WrapErrContinue(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusContinue, - } -} - -// ErrCreated is a StatusCreated Error -var ErrCreated = ErrorWithStatusCode{statusCode: http.StatusCreated} - -// NewErrCreated returns a new ErrCreated with custom error message -func NewErrCreated(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusCreated, - } -} - -// WrapErrCreated returns a new ErrCreated, wrapping an existing error -func WrapErrCreated(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusCreated, - } -} - -// ErrExpectationFailed is a StatusExpectationFailed Error -var ErrExpectationFailed = ErrorWithStatusCode{statusCode: http.StatusExpectationFailed} - -// NewErrExpectationFailed returns a new ErrExpectationFailed with custom error message -func NewErrExpectationFailed(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusExpectationFailed, - } -} - -// WrapErrExpectationFailed returns a new ErrExpectationFailed, wrapping an existing error -func WrapErrExpectationFailed(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusExpectationFailed, - } -} - -// ErrFailedDependency is a StatusFailedDependency Error -var ErrFailedDependency = ErrorWithStatusCode{statusCode: http.StatusFailedDependency} - -// NewErrFailedDependency returns a new ErrFailedDependency with custom error message -func NewErrFailedDependency(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusFailedDependency, - } -} - -// WrapErrFailedDependency returns a new ErrFailedDependency, wrapping an existing error -func WrapErrFailedDependency(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusFailedDependency, - } -} - -// ErrForbidden is a StatusForbidden Error -var ErrForbidden = ErrorWithStatusCode{statusCode: http.StatusForbidden} - -// NewErrForbidden returns a new ErrForbidden with custom error message -func NewErrForbidden(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusForbidden, - } -} - -// WrapErrForbidden returns a new ErrForbidden, wrapping an existing error -func WrapErrForbidden(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusForbidden, - } -} - -// ErrFound is a StatusFound Error -var ErrFound = ErrorWithStatusCode{statusCode: http.StatusFound} - -// NewErrFound returns a new ErrFound with custom error message -func NewErrFound(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusFound, - } -} - -// WrapErrFound returns a new ErrFound, wrapping an existing error -func WrapErrFound(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusFound, - } -} - -// ErrGatewayTimeout is a StatusGatewayTimeout Error -var ErrGatewayTimeout = ErrorWithStatusCode{statusCode: http.StatusGatewayTimeout} - -// NewErrGatewayTimeout returns a new ErrGatewayTimeout with custom error message -func NewErrGatewayTimeout(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusGatewayTimeout, - } -} - -// WrapErrGatewayTimeout returns a new ErrGatewayTimeout, wrapping an existing error -func WrapErrGatewayTimeout(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusGatewayTimeout, - } -} - -// ErrGone is a StatusGone Error -var ErrGone = ErrorWithStatusCode{statusCode: http.StatusGone} - -// NewErrGone returns a new ErrGone with custom error message -func NewErrGone(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusGone, - } -} - -// WrapErrGone returns a new ErrGone, wrapping an existing error -func WrapErrGone(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusGone, - } -} - -// ErrHTTPVersionNotSupported is a StatusHTTPVersionNotSupported Error -var ErrHTTPVersionNotSupported = ErrorWithStatusCode{statusCode: http.StatusHTTPVersionNotSupported} - -// NewErrHTTPVersionNotSupported returns a new ErrHTTPVersionNotSupported with custom error message -func NewErrHTTPVersionNotSupported(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusHTTPVersionNotSupported, - } -} - -// WrapErrHTTPVersionNotSupported returns a new ErrHTTPVersionNotSupported, wrapping an existing error -func WrapErrHTTPVersionNotSupported(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusHTTPVersionNotSupported, - } -} - -// ErrIMUsed is a StatusIMUsed Error -var ErrIMUsed = ErrorWithStatusCode{statusCode: http.StatusIMUsed} - -// NewErrIMUsed returns a new ErrIMUsed with custom error message -func NewErrIMUsed(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusIMUsed, - } -} - -// WrapErrIMUsed returns a new ErrIMUsed, wrapping an existing error -func WrapErrIMUsed(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusIMUsed, - } -} - -// ErrInsufficientStorage is a StatusInsufficientStorage Error -var ErrInsufficientStorage = ErrorWithStatusCode{statusCode: http.StatusInsufficientStorage} - -// NewErrInsufficientStorage returns a new ErrInsufficientStorage with custom error message -func NewErrInsufficientStorage(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusInsufficientStorage, - } -} - -// WrapErrInsufficientStorage returns a new ErrInsufficientStorage, wrapping an existing error -func WrapErrInsufficientStorage(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusInsufficientStorage, - } -} - -// ErrInternalServerError is a StatusInternalServerError Error -var ErrInternalServerError = ErrorWithStatusCode{statusCode: http.StatusInternalServerError} - -// NewErrInternalServerError returns a new ErrInternalServerError with custom error message -func NewErrInternalServerError(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusInternalServerError, - } -} - -// WrapErrInternalServerError returns a new ErrInternalServerError, wrapping an existing error -func WrapErrInternalServerError(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusInternalServerError, - } -} - -// ErrLengthRequired is a StatusLengthRequired Error -var ErrLengthRequired = ErrorWithStatusCode{statusCode: http.StatusLengthRequired} - -// NewErrLengthRequired returns a new ErrLengthRequired with custom error message -func NewErrLengthRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusLengthRequired, - } -} - -// WrapErrLengthRequired returns a new ErrLengthRequired, wrapping an existing error -func WrapErrLengthRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusLengthRequired, - } -} - -// ErrLocked is a StatusLocked Error -var ErrLocked = ErrorWithStatusCode{statusCode: http.StatusLocked} - -// NewErrLocked returns a new ErrLocked with custom error message -func NewErrLocked(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusLocked, - } -} - -// WrapErrLocked returns a new ErrLocked, wrapping an existing error -func WrapErrLocked(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusLocked, - } -} - -// ErrLoopDetected is a StatusLoopDetected Error -var ErrLoopDetected = ErrorWithStatusCode{statusCode: http.StatusLoopDetected} - -// NewErrLoopDetected returns a new ErrLoopDetected with custom error message -func NewErrLoopDetected(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusLoopDetected, - } -} - -// WrapErrLoopDetected returns a new ErrLoopDetected, wrapping an existing error -func WrapErrLoopDetected(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusLoopDetected, - } -} - -// ErrMethodNotAllowed is a StatusMethodNotAllowed Error -var ErrMethodNotAllowed = ErrorWithStatusCode{statusCode: http.StatusMethodNotAllowed} - -// NewErrMethodNotAllowed returns a new ErrMethodNotAllowed with custom error message -func NewErrMethodNotAllowed(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusMethodNotAllowed, - } -} - -// WrapErrMethodNotAllowed returns a new ErrMethodNotAllowed, wrapping an existing error -func WrapErrMethodNotAllowed(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusMethodNotAllowed, - } -} - -// ErrMovedPermanently is a StatusMovedPermanently Error -var ErrMovedPermanently = ErrorWithStatusCode{statusCode: http.StatusMovedPermanently} - -// NewErrMovedPermanently returns a new ErrMovedPermanently with custom error message -func NewErrMovedPermanently(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusMovedPermanently, - } -} - -// WrapErrMovedPermanently returns a new ErrMovedPermanently, wrapping an existing error -func WrapErrMovedPermanently(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusMovedPermanently, - } -} - -// ErrMultiStatus is a StatusMultiStatus Error -var ErrMultiStatus = ErrorWithStatusCode{statusCode: http.StatusMultiStatus} - -// NewErrMultiStatus returns a new ErrMultiStatus with custom error message -func NewErrMultiStatus(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusMultiStatus, - } -} - -// WrapErrMultiStatus returns a new ErrMultiStatus, wrapping an existing error -func WrapErrMultiStatus(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusMultiStatus, - } -} - -// ErrMultipleChoices is a StatusMultipleChoices Error -var ErrMultipleChoices = ErrorWithStatusCode{statusCode: http.StatusMultipleChoices} - -// NewErrMultipleChoices returns a new ErrMultipleChoices with custom error message -func NewErrMultipleChoices(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusMultipleChoices, - } -} - -// WrapErrMultipleChoices returns a new ErrMultipleChoices, wrapping an existing error -func WrapErrMultipleChoices(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusMultipleChoices, - } -} - -// ErrNetworkAuthenticationRequired is a StatusNetworkAuthenticationRequired Error -var ErrNetworkAuthenticationRequired = ErrorWithStatusCode{statusCode: http.StatusNetworkAuthenticationRequired} - -// NewErrNetworkAuthenticationRequired returns a new ErrNetworkAuthenticationRequired with custom error message -func NewErrNetworkAuthenticationRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNetworkAuthenticationRequired, - } -} - -// WrapErrNetworkAuthenticationRequired returns a new ErrNetworkAuthenticationRequired, wrapping an existing error -func WrapErrNetworkAuthenticationRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNetworkAuthenticationRequired, - } -} - -// ErrNoContent is a StatusNoContent Error -var ErrNoContent = ErrorWithStatusCode{statusCode: http.StatusNoContent} - -// NewErrNoContent returns a new ErrNoContent with custom error message -func NewErrNoContent(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNoContent, - } -} - -// WrapErrNoContent returns a new ErrNoContent, wrapping an existing error -func WrapErrNoContent(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNoContent, - } -} - -// ErrNonAuthoritativeInfo is a StatusNonAuthoritativeInfo Error -var ErrNonAuthoritativeInfo = ErrorWithStatusCode{statusCode: http.StatusNonAuthoritativeInfo} - -// NewErrNonAuthoritativeInfo returns a new ErrNonAuthoritativeInfo with custom error message -func NewErrNonAuthoritativeInfo(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNonAuthoritativeInfo, - } -} - -// WrapErrNonAuthoritativeInfo returns a new ErrNonAuthoritativeInfo, wrapping an existing error -func WrapErrNonAuthoritativeInfo(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNonAuthoritativeInfo, - } -} - -// ErrNotAcceptable is a StatusNotAcceptable Error -var ErrNotAcceptable = ErrorWithStatusCode{statusCode: http.StatusNotAcceptable} - -// NewErrNotAcceptable returns a new ErrNotAcceptable with custom error message -func NewErrNotAcceptable(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNotAcceptable, - } -} - -// WrapErrNotAcceptable returns a new ErrNotAcceptable, wrapping an existing error -func WrapErrNotAcceptable(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNotAcceptable, - } -} - -// ErrNotExtended is a StatusNotExtended Error -var ErrNotExtended = ErrorWithStatusCode{statusCode: http.StatusNotExtended} - -// NewErrNotExtended returns a new ErrNotExtended with custom error message -func NewErrNotExtended(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNotExtended, - } -} - -// WrapErrNotExtended returns a new ErrNotExtended, wrapping an existing error -func WrapErrNotExtended(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNotExtended, - } -} - -// ErrNotFound is a StatusNotFound Error -var ErrNotFound = ErrorWithStatusCode{statusCode: http.StatusNotFound} - -// NewErrNotFound returns a new ErrNotFound with custom error message -func NewErrNotFound(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNotFound, - } -} - -// WrapErrNotFound returns a new ErrNotFound, wrapping an existing error -func WrapErrNotFound(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNotFound, - } -} - -// ErrNotImplemented is a StatusNotImplemented Error -var ErrNotImplemented = ErrorWithStatusCode{statusCode: http.StatusNotImplemented} - -// NewErrNotImplemented returns a new ErrNotImplemented with custom error message -func NewErrNotImplemented(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNotImplemented, - } -} - -// WrapErrNotImplemented returns a new ErrNotImplemented, wrapping an existing error -func WrapErrNotImplemented(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNotImplemented, - } -} - -// ErrNotModified is a StatusNotModified Error -var ErrNotModified = ErrorWithStatusCode{statusCode: http.StatusNotModified} - -// NewErrNotModified returns a new ErrNotModified with custom error message -func NewErrNotModified(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNotModified, - } -} - -// WrapErrNotModified returns a new ErrNotModified, wrapping an existing error -func WrapErrNotModified(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNotModified, - } -} - -// ErrPartialContent is a StatusPartialContent Error -var ErrPartialContent = ErrorWithStatusCode{statusCode: http.StatusPartialContent} - -// NewErrPartialContent returns a new ErrPartialContent with custom error message -func NewErrPartialContent(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusPartialContent, - } -} - -// WrapErrPartialContent returns a new ErrPartialContent, wrapping an existing error -func WrapErrPartialContent(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusPartialContent, - } -} - -// ErrPaymentRequired is a StatusPaymentRequired Error -var ErrPaymentRequired = ErrorWithStatusCode{statusCode: http.StatusPaymentRequired} - -// NewErrPaymentRequired returns a new ErrPaymentRequired with custom error message -func NewErrPaymentRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusPaymentRequired, - } -} - -// WrapErrPaymentRequired returns a new ErrPaymentRequired, wrapping an existing error -func WrapErrPaymentRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusPaymentRequired, - } -} - -// ErrPermanentRedirect is a StatusPermanentRedirect Error -var ErrPermanentRedirect = ErrorWithStatusCode{statusCode: http.StatusPermanentRedirect} - -// NewErrPermanentRedirect returns a new ErrPermanentRedirect with custom error message -func NewErrPermanentRedirect(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusPermanentRedirect, - } -} - -// WrapErrPermanentRedirect returns a new ErrPermanentRedirect, wrapping an existing error -func WrapErrPermanentRedirect(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusPermanentRedirect, - } -} - -// ErrPreconditionFailed is a StatusPreconditionFailed Error -var ErrPreconditionFailed = ErrorWithStatusCode{statusCode: http.StatusPreconditionFailed} - -// NewErrPreconditionFailed returns a new ErrPreconditionFailed with custom error message -func NewErrPreconditionFailed(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusPreconditionFailed, - } -} - -// WrapErrPreconditionFailed returns a new ErrPreconditionFailed, wrapping an existing error -func WrapErrPreconditionFailed(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusPreconditionFailed, - } -} - -// ErrPreconditionRequired is a StatusPreconditionRequired Error -var ErrPreconditionRequired = ErrorWithStatusCode{statusCode: http.StatusPreconditionRequired} - -// NewErrPreconditionRequired returns a new ErrPreconditionRequired with custom error message -func NewErrPreconditionRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusPreconditionRequired, - } -} - -// WrapErrPreconditionRequired returns a new ErrPreconditionRequired, wrapping an existing error -func WrapErrPreconditionRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusPreconditionRequired, - } -} - -// ErrProcessing is a StatusProcessing Error -var ErrProcessing = ErrorWithStatusCode{statusCode: http.StatusProcessing} - -// NewErrProcessing returns a new ErrProcessing with custom error message -func NewErrProcessing(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusProcessing, - } -} - -// WrapErrProcessing returns a new ErrProcessing, wrapping an existing error -func WrapErrProcessing(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusProcessing, - } -} - -// ErrProxyAuthRequired is a StatusProxyAuthRequired Error -var ErrProxyAuthRequired = ErrorWithStatusCode{statusCode: http.StatusProxyAuthRequired} - -// NewErrProxyAuthRequired returns a new ErrProxyAuthRequired with custom error message -func NewErrProxyAuthRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusProxyAuthRequired, - } -} - -// WrapErrProxyAuthRequired returns a new ErrProxyAuthRequired, wrapping an existing error -func WrapErrProxyAuthRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusProxyAuthRequired, - } -} - -// ErrRequestEntityTooLarge is a StatusRequestEntityTooLarge Error -var ErrRequestEntityTooLarge = ErrorWithStatusCode{statusCode: http.StatusRequestEntityTooLarge} - -// NewErrRequestEntityTooLarge returns a new ErrRequestEntityTooLarge with custom error message -func NewErrRequestEntityTooLarge(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusRequestEntityTooLarge, - } -} - -// WrapErrRequestEntityTooLarge returns a new ErrRequestEntityTooLarge, wrapping an existing error -func WrapErrRequestEntityTooLarge(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusRequestEntityTooLarge, - } -} - -// ErrRequestHeaderFieldsTooLarge is a StatusRequestHeaderFieldsTooLarge Error -var ErrRequestHeaderFieldsTooLarge = ErrorWithStatusCode{statusCode: http.StatusRequestHeaderFieldsTooLarge} - -// NewErrRequestHeaderFieldsTooLarge returns a new ErrRequestHeaderFieldsTooLarge with custom error message -func NewErrRequestHeaderFieldsTooLarge(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusRequestHeaderFieldsTooLarge, - } -} - -// WrapErrRequestHeaderFieldsTooLarge returns a new ErrRequestHeaderFieldsTooLarge, wrapping an existing error -func WrapErrRequestHeaderFieldsTooLarge(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusRequestHeaderFieldsTooLarge, - } -} - -// ErrRequestTimeout is a StatusRequestTimeout Error -var ErrRequestTimeout = ErrorWithStatusCode{statusCode: http.StatusRequestTimeout} - -// NewErrRequestTimeout returns a new ErrRequestTimeout with custom error message -func NewErrRequestTimeout(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusRequestTimeout, - } -} - -// WrapErrRequestTimeout returns a new ErrRequestTimeout, wrapping an existing error -func WrapErrRequestTimeout(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusRequestTimeout, - } -} - -// ErrRequestURITooLong is a StatusRequestURITooLong Error -var ErrRequestURITooLong = ErrorWithStatusCode{statusCode: http.StatusRequestURITooLong} - -// NewErrRequestURITooLong returns a new ErrRequestURITooLong with custom error message -func NewErrRequestURITooLong(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusRequestURITooLong, - } -} - -// WrapErrRequestURITooLong returns a new ErrRequestURITooLong, wrapping an existing error -func WrapErrRequestURITooLong(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusRequestURITooLong, - } -} - -// ErrRequestedRangeNotSatisfiable is a StatusRequestedRangeNotSatisfiable Error -var ErrRequestedRangeNotSatisfiable = ErrorWithStatusCode{statusCode: http.StatusRequestedRangeNotSatisfiable} - -// NewErrRequestedRangeNotSatisfiable returns a new ErrRequestedRangeNotSatisfiable with custom error message -func NewErrRequestedRangeNotSatisfiable(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusRequestedRangeNotSatisfiable, - } -} - -// WrapErrRequestedRangeNotSatisfiable returns a new ErrRequestedRangeNotSatisfiable, wrapping an existing error -func WrapErrRequestedRangeNotSatisfiable(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusRequestedRangeNotSatisfiable, - } -} - -// ErrResetContent is a StatusResetContent Error -var ErrResetContent = ErrorWithStatusCode{statusCode: http.StatusResetContent} - -// NewErrResetContent returns a new ErrResetContent with custom error message -func NewErrResetContent(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusResetContent, - } -} - -// WrapErrResetContent returns a new ErrResetContent, wrapping an existing error -func WrapErrResetContent(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusResetContent, - } -} - -// ErrSeeOther is a StatusSeeOther Error -var ErrSeeOther = ErrorWithStatusCode{statusCode: http.StatusSeeOther} - -// NewErrSeeOther returns a new ErrSeeOther with custom error message -func NewErrSeeOther(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusSeeOther, - } -} - -// WrapErrSeeOther returns a new ErrSeeOther, wrapping an existing error -func WrapErrSeeOther(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusSeeOther, - } -} - -// ErrServiceUnavailable is a StatusServiceUnavailable Error -var ErrServiceUnavailable = ErrorWithStatusCode{statusCode: http.StatusServiceUnavailable} - -// NewErrServiceUnavailable returns a new ErrServiceUnavailable with custom error message -func NewErrServiceUnavailable(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusServiceUnavailable, - } -} - -// WrapErrServiceUnavailable returns a new ErrServiceUnavailable, wrapping an existing error -func WrapErrServiceUnavailable(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusServiceUnavailable, - } -} - -// ErrSwitchingProtocols is a StatusSwitchingProtocols Error -var ErrSwitchingProtocols = ErrorWithStatusCode{statusCode: http.StatusSwitchingProtocols} - -// NewErrSwitchingProtocols returns a new ErrSwitchingProtocols with custom error message -func NewErrSwitchingProtocols(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusSwitchingProtocols, - } -} - -// WrapErrSwitchingProtocols returns a new ErrSwitchingProtocols, wrapping an existing error -func WrapErrSwitchingProtocols(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusSwitchingProtocols, - } -} - -// ErrTeapot is a StatusTeapot Error -var ErrTeapot = ErrorWithStatusCode{statusCode: http.StatusTeapot} - -// NewErrTeapot returns a new ErrTeapot with custom error message -func NewErrTeapot(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusTeapot, - } -} - -// WrapErrTeapot returns a new ErrTeapot, wrapping an existing error -func WrapErrTeapot(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusTeapot, - } -} - -// ErrTemporaryRedirect is a StatusTemporaryRedirect Error -var ErrTemporaryRedirect = ErrorWithStatusCode{statusCode: http.StatusTemporaryRedirect} - -// NewErrTemporaryRedirect returns a new ErrTemporaryRedirect with custom error message -func NewErrTemporaryRedirect(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusTemporaryRedirect, - } -} - -// WrapErrTemporaryRedirect returns a new ErrTemporaryRedirect, wrapping an existing error -func WrapErrTemporaryRedirect(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusTemporaryRedirect, - } -} - -// ErrTooManyRequests is a StatusTooManyRequests Error -var ErrTooManyRequests = ErrorWithStatusCode{statusCode: http.StatusTooManyRequests} - -// NewErrTooManyRequests returns a new ErrTooManyRequests with custom error message -func NewErrTooManyRequests(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusTooManyRequests, - } -} - -// WrapErrTooManyRequests returns a new ErrTooManyRequests, wrapping an existing error -func WrapErrTooManyRequests(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusTooManyRequests, - } -} - -// ErrUnauthorized is a StatusUnauthorized Error -var ErrUnauthorized = ErrorWithStatusCode{statusCode: http.StatusUnauthorized} - -// NewErrUnauthorized returns a new ErrUnauthorized with custom error message -func NewErrUnauthorized(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUnauthorized, - } -} - -// WrapErrUnauthorized returns a new ErrUnauthorized, wrapping an existing error -func WrapErrUnauthorized(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUnauthorized, - } -} - -// ErrUnavailableForLegalReasons is a StatusUnavailableForLegalReasons Error -var ErrUnavailableForLegalReasons = ErrorWithStatusCode{statusCode: http.StatusUnavailableForLegalReasons} - -// NewErrUnavailableForLegalReasons returns a new ErrUnavailableForLegalReasons with custom error message -func NewErrUnavailableForLegalReasons(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUnavailableForLegalReasons, - } -} - -// WrapErrUnavailableForLegalReasons returns a new ErrUnavailableForLegalReasons, wrapping an existing error -func WrapErrUnavailableForLegalReasons(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUnavailableForLegalReasons, - } -} - -// ErrUnprocessableEntity is a StatusUnprocessableEntity Error -var ErrUnprocessableEntity = ErrorWithStatusCode{statusCode: http.StatusUnprocessableEntity} - -// NewErrUnprocessableEntity returns a new ErrUnprocessableEntity with custom error message -func NewErrUnprocessableEntity(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUnprocessableEntity, - } -} - -// WrapErrUnprocessableEntity returns a new ErrUnprocessableEntity, wrapping an existing error -func WrapErrUnprocessableEntity(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUnprocessableEntity, - } -} - -// ErrUnsupportedMediaType is a StatusUnsupportedMediaType Error -var ErrUnsupportedMediaType = ErrorWithStatusCode{statusCode: http.StatusUnsupportedMediaType} - -// NewErrUnsupportedMediaType returns a new ErrUnsupportedMediaType with custom error message -func NewErrUnsupportedMediaType(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUnsupportedMediaType, - } -} - -// WrapErrUnsupportedMediaType returns a new ErrUnsupportedMediaType, wrapping an existing error -func WrapErrUnsupportedMediaType(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUnsupportedMediaType, - } -} - -// ErrUpgradeRequired is a StatusUpgradeRequired Error -var ErrUpgradeRequired = ErrorWithStatusCode{statusCode: http.StatusUpgradeRequired} - -// NewErrUpgradeRequired returns a new ErrUpgradeRequired with custom error message -func NewErrUpgradeRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUpgradeRequired, - } -} - -// WrapErrUpgradeRequired returns a new ErrUpgradeRequired, wrapping an existing error -func WrapErrUpgradeRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUpgradeRequired, - } -} - -// ErrUseProxy is a StatusUseProxy Error -var ErrUseProxy = ErrorWithStatusCode{statusCode: http.StatusUseProxy} - -// NewErrUseProxy returns a new ErrUseProxy with custom error message -func NewErrUseProxy(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUseProxy, - } -} - -// WrapErrUseProxy returns a new ErrUseProxy, wrapping an existing error -func WrapErrUseProxy(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUseProxy, - } -} - -// ErrVariantAlsoNegotiates is a StatusVariantAlsoNegotiates Error -var ErrVariantAlsoNegotiates = ErrorWithStatusCode{statusCode: http.StatusVariantAlsoNegotiates} - -// NewErrVariantAlsoNegotiates returns a new ErrVariantAlsoNegotiates with custom error message -func NewErrVariantAlsoNegotiates(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusVariantAlsoNegotiates, - } -} - -// WrapErrVariantAlsoNegotiates returns a new ErrVariantAlsoNegotiates, wrapping an existing error -func WrapErrVariantAlsoNegotiates(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusVariantAlsoNegotiates, - } -} - -var defaultMessages = map[int]string{ - http.StatusAccepted: "Accepted", - http.StatusAlreadyReported: "Already Reported", - http.StatusBadGateway: "Bad Gateway", - http.StatusBadRequest: "Bad Request", - http.StatusConflict: "Conflict", - http.StatusContinue: "Continue", - http.StatusCreated: "Created", - http.StatusExpectationFailed: "Expectation Failed", - http.StatusFailedDependency: "Failed Dependency", - http.StatusForbidden: "Forbidden", - http.StatusFound: "Found", - http.StatusGatewayTimeout: "Gateway Timeout", - http.StatusGone: "Gone", - http.StatusHTTPVersionNotSupported: "HTTPVersion Not Supported", - http.StatusIMUsed: "IMUsed", - http.StatusInsufficientStorage: "Insufficient Storage", - http.StatusInternalServerError: "Internal Server Error", - http.StatusLengthRequired: "Length Required", - http.StatusLocked: "Locked", - http.StatusLoopDetected: "Loop Detected", - http.StatusMethodNotAllowed: "Method Not Allowed", - http.StatusMovedPermanently: "Moved Permanently", - http.StatusMultiStatus: "Multi Status", - http.StatusMultipleChoices: "Multiple Choices", - http.StatusNetworkAuthenticationRequired: "Network Authentication Required", - http.StatusNoContent: "No Content", - http.StatusNonAuthoritativeInfo: "Non Authoritative Info", - http.StatusNotAcceptable: "Not Acceptable", - http.StatusNotExtended: "Not Extended", - http.StatusNotFound: "Not Found", - http.StatusNotImplemented: "Not Implemented", - http.StatusNotModified: "Not Modified", - http.StatusPartialContent: "Partial Content", - http.StatusPaymentRequired: "Payment Required", - http.StatusPermanentRedirect: "Permanent Redirect", - http.StatusPreconditionFailed: "Precondition Failed", - http.StatusPreconditionRequired: "Precondition Required", - http.StatusProcessing: "Processing", - http.StatusProxyAuthRequired: "Proxy Auth Required", - http.StatusRequestEntityTooLarge: "Request Entity Too Large", - http.StatusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large", - http.StatusRequestTimeout: "Request Timeout", - http.StatusRequestURITooLong: "Request URIToo Long", - http.StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", - http.StatusResetContent: "Reset Content", - http.StatusSeeOther: "See Other", - http.StatusServiceUnavailable: "Service Unavailable", - http.StatusSwitchingProtocols: "Switching Protocols", - http.StatusTeapot: "Teapot", - http.StatusTemporaryRedirect: "Temporary Redirect", - http.StatusTooManyRequests: "Too Many Requests", - http.StatusUnauthorized: "Unauthorized", - http.StatusUnavailableForLegalReasons: "Unavailable For Legal Reasons", - http.StatusUnprocessableEntity: "Unprocessable Entity", - http.StatusUnsupportedMediaType: "Unsupported Media Type", - http.StatusUpgradeRequired: "Upgrade Required", - http.StatusUseProxy: "Use Proxy", - http.StatusVariantAlsoNegotiates: "Variant Also Negotiates", -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/event.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/event.go deleted file mode 100644 index 8dd51720..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/event.go +++ /dev/null @@ -1,275 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -import ( - "errors" - "strconv" - "time" -) - -// ErrUnsupported is returned when an unsupported interface on the event is called -var ErrUnsupported = errors.New("Event does not support this interface") - -// ErrTypeConversion is returned when a type conversion for headers / fields fails -var ErrTypeConversion = errors.New("Cannot convert to this type") - -// TriggerInfoProvider provides information about the trigger in which this event originated -type TriggerInfoProvider interface { - - // get the class of source (sync, async, etc) - GetClass() string - - // get specific kind of source (http, rabbit mq, etc) - GetKind() string -} - -// Event allows access to the concrete event -type Event interface { - - // GetID returns the ID of the event - GetID() ID - - // SetID sets the ID of the event - SetID(ID) - - // SetTriggerInfoProvider sets the information about the trigger who triggered this event - SetTriggerInfoProvider(TriggerInfoProvider) - - // GetTriggerInfo retruns a trigger info provider - GetTriggerInfo() TriggerInfoProvider - - // GetContentType returns the content type of the body - GetContentType() string - - // GetBody returns the body of the event - GetBody() []byte - - // GetBodyObject returns the body of the event as an object - GetBodyObject() interface{} - - // GetHeader returns the header by name as an interface{} - GetHeader(string) interface{} - - // GetHeaderByteSlice returns the header by name as a byte slice - GetHeaderByteSlice(string) []byte - - // GetHeaderString returns the header by name as a string - GetHeaderString(string) string - - // GetHeaderInt returns the field by name as an integer - GetHeaderInt(string) (int, error) - - // GetHeaders loads all headers into a map of string / interface{} - GetHeaders() map[string]interface{} - - // GetField returns the field by name as an interface{} - GetField(string) interface{} - - // GetFieldByteSlice returns the field by name as a byte slice - GetFieldByteSlice(string) []byte - - // GetFieldString returns the field by name as a string - GetFieldString(string) string - - // GetFieldInt returns the field by name as an integer - GetFieldInt(string) (int, error) - - // GetFields loads all fields into a map of string / interface{} - GetFields() map[string]interface{} - - // GetTimestamp returns when the event originated - GetTimestamp() time.Time - - // GetPath returns the path of the event - GetPath() string - - // GetURL returns the URL of the event - GetURL() string - - // GetPath returns the method of the event, if applicable - GetMethod() string - - // GetShardID returns the ID of the shard from which this event arrived, if applicable - GetShardID() int - - // GetTotalNumShards returns the total number of shards, if applicable - GetTotalNumShards() int - - // GetType returns the type of event - GetType() string - - // GetTypeVersion returns the version of the type - GetTypeVersion() string - - // GetVersion returns the version of the event - GetVersion() string -} - -// AbstractEvent provides a base implemention of an event -type AbstractEvent struct { - triggerInfoProvider TriggerInfoProvider - id ID - emptyByteArray []byte - emptyHeaders map[string]interface{} - emptyTime time.Time -} - -// SetTriggerInfoProvider sets the information about the trigger who triggered this event -func (ae *AbstractEvent) SetTriggerInfoProvider(triggerInfoProvider TriggerInfoProvider) { - ae.triggerInfoProvider = triggerInfoProvider -} - -// GetTriggerInfo retruns a trigger info provider -func (ae *AbstractEvent) GetTriggerInfo() TriggerInfoProvider { - return ae.triggerInfoProvider -} - -// GetID returns the ID of the event -func (ae *AbstractEvent) GetID() ID { - return ae.id -} - -// SetID sets the ID of the event -func (ae *AbstractEvent) SetID(id ID) { - ae.id = id -} - -// GetContentType returns the content type of the body -func (ae *AbstractEvent) GetContentType() string { - return "" -} - -// GetBody returns the body of the event -func (ae *AbstractEvent) GetBody() []byte { - return ae.emptyByteArray -} - -// GetBodyObject returns the body of the event as an object -func (ae *AbstractEvent) GetBodyObject() interface{} { - return ae.GetBody() -} - -// GetHeader returns the header by name as an interface{} -func (ae *AbstractEvent) GetHeader(key string) interface{} { - return nil -} - -// GetHeaderByteSlice returns the header by name as a byte slice -func (ae *AbstractEvent) GetHeaderByteSlice(key string) []byte { - return ae.emptyByteArray -} - -// GetHeaderString returns the header by name as a string -func (ae *AbstractEvent) GetHeaderString(key string) string { - return string(ae.GetHeaderByteSlice(key)) -} - -// GetHeaderInt returns the field by name as an integer -func (ae *AbstractEvent) GetHeaderInt(key string) (int, error) { - - // try to get header as an interface - headerAsInterface := ae.GetHeader(key) - - // if the header value is not an integer - switch typedHeader := headerAsInterface.(type) { - case int: - return typedHeader, nil - case string: - return strconv.Atoi(typedHeader) - case []byte: - return strconv.Atoi(string(typedHeader)) - - default: - return 0, ErrTypeConversion - } -} - -// GetHeaders loads all headers into a map of string / interface{} -func (ae *AbstractEvent) GetHeaders() map[string]interface{} { - return ae.emptyHeaders -} - -// GetTimestamp returns when the event originated -func (ae *AbstractEvent) GetTimestamp() time.Time { - return ae.emptyTime -} - -// GetPath returns the path of the event -func (ae *AbstractEvent) GetPath() string { - return "" -} - -// GetURL returns the URL of the event -func (ae *AbstractEvent) GetURL() string { - return "" -} - -// GetPath returns the method of the event, if applicable -func (ae *AbstractEvent) GetMethod() string { - return "" -} - -// GetField returns the field by name as an interface{} -func (ae *AbstractEvent) GetField(key string) interface{} { - return nil -} - -// GetFieldByteSlice returns the field by name as a byte slice -func (ae *AbstractEvent) GetFieldByteSlice(key string) []byte { - return nil -} - -// GetFieldString returns the field by name as a string -func (ae *AbstractEvent) GetFieldString(key string) string { - return "" -} - -// GetFieldInt returns the field by name as an integer -func (ae *AbstractEvent) GetFieldInt(key string) (int, error) { - return 0, ErrUnsupported -} - -// GetFields loads all fields into a map of string / interface{} -func (ae *AbstractEvent) GetFields() map[string]interface{} { - return nil -} - -// GetShardID returns the ID of the shard from which this event arrived, if applicable -func (ae *AbstractEvent) GetShardID() int { - return -1 -} - -// GetTotalNumShards returns the total number of shards, if applicable -func (ae *AbstractEvent) GetTotalNumShards() int { - return 0 -} - -// GetType returns the type of event -func (ae *AbstractEvent) GetType() string { - return "" -} - -// GetTypeVersion returns the version of the type -func (ae *AbstractEvent) GetTypeVersion() string { - return "" -} - -// GetVersion returns the version of the event -func (ae *AbstractEvent) GetVersion() string { - return "" -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/gen_errors.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/gen_errors.go deleted file mode 100644 index 017f111b..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/gen_errors.go +++ /dev/null @@ -1,183 +0,0 @@ -// +build ignore - -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Generate errors.go from constants in net/http -package main - -import ( - "flag" - "go/importer" - "log" - "os" - "regexp" - "sort" - "strings" - "text/template" -) - -const ( - statusPrefix = "Status" - - codeTemplateText = `// Automatically generated by gen_errors.go - -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -import ( - "errors" - "fmt" - "net/http" -) - -// WithStatusCode is an error with status code -type WithStatusCode interface { - StatusCode() int -} - -// ErrorWithStatusCode implements both error and WithStatusCode -type ErrorWithStatusCode struct { - error - statusCode int -} - -// GetError returns the underlying error -func (e *ErrorWithStatusCode) GetError() error { - return e.error -} - -// StatusCode returns the status code -func (e *ErrorWithStatusCode) StatusCode() int { - return e.statusCode -} - -// Error returns the error message -func (e ErrorWithStatusCode) Error() string { - if e.error != nil { - return e.error.Error() - } - - message, ok := defaultMessages[e.statusCode] - if !ok { - message = fmt.Sprintf("Unknown error: %d", e.statusCode) - } - - return message -} - -{{range .}} -// {{. | StatusToError}} is a {{.}} Error -var {{. | StatusToError}} = ErrorWithStatusCode{statusCode: http.{{.}}} - -// New{{. | StatusToError}} returns a new {{. | StatusToError}} with custom error message -func New{{. | StatusToError}}(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.{{.}}, - } -} - -// Wrap{{. | StatusToError}} returns a new {{. | StatusToError}}, wrapping an existing error -func Wrap{{. | StatusToError}}(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.{{.}}, - } -} -{{end}} - -var defaultMessages = map[int]string{ -{{- range .}} - http.{{.}}: "{{. | HumanStatus}}", -{{- end}} -} -` -) - -var ( - // Add space between camel case - humanRe = regexp.MustCompile("([a-z])([A-Z])") -) - -// StatusToError convert http status name to error name -// (e.g. "StatusAccepted" -> "ErrAccepted") -func StatusToError(status string) string { - return "Err" + status[len(statusPrefix):] -} - -// HumanStatus returns human formed status -// (e.g. "StatusTooManyRequests" -> "Too Many Requests") -func HumanStatus(status string) string { - return humanRe.ReplaceAllString(status[len(statusPrefix):], "$1 $2") -} - -func main() { - flag.Parse() // Support -h, --help - - pkg, err := importer.Default().Import("net/http") - if err != nil { - log.Fatal(err) - } - - var names []string - for _, name := range pkg.Scope().Names() { - if !strings.HasPrefix(name, statusPrefix) || name == "StatusOK" { - continue - } - - obj := pkg.Scope().Lookup(name) - if obj.Type().String() != "untyped int" { - continue - } - - names = append(names, name) - } - sort.Strings(names) - - funcMap := template.FuncMap{ - "StatusToError": StatusToError, - "HumanStatus": HumanStatus, - } - codeTemplate, err := template.New("").Funcs(funcMap).Parse(codeTemplateText) - if err != nil { - log.Fatal(err) - } - - out, err := os.Create("errors.go") - if err != nil { - log.Fatal(err) - } - - if err := codeTemplate.Execute(out, names); err != nil { - log.Fatal(err) - } -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/response.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/response.go deleted file mode 100644 index 041ce1b8..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/response.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -// Response can be returned from functions, allowing the user to specify various fields -type Response struct { - StatusCode int - ContentType string - Headers map[string]interface{} - Body []byte -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/types.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/types.go deleted file mode 100644 index fdc1a34c..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/types.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -// ID is event ID -type ID string diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/LICENSE b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/README.md b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/README.md deleted file mode 100644 index 7fe41427..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# nuclio function wrapper - -Test nuclio functions locally of as part of Go testing - -# Usage, Golang unit testing framework: - -```golang -package main - -import ( - "testing" - "github.com/nuclio/nuclio-test-go" -) - -func TestName(t *testing.T) { - // data binding for V3IO data containers, optional (use nil instead of &data) - data := nutest.DataBind{Name:"db0", Url:"", Container:"x"} - - // Create TestContext and specify the function name, verbose, data - tc, err := nutest.NewTestContext(MyHandler, true, &data ) - if err != nil { - t.Fail() - } - - // Optional, initialize context must have a function in the form: - // InitContext(context *nuclio.Context) error - err = tc.InitContext(InitContext) - if err != nil { - t.Fail() - } - - // Create a new test event - testEvent := nutest.TestEvent{ - Path: "/some/path", - Body: []byte("1234"), - Headers:map[string]interface{}{"first": "string"}, - } - - // invoke the tested function with the new event and log it's output - resp, err := tc.Invoke(&testEvent) - tc.Logger.InfoWith("Run complete", "resp", resp, "err", err) -} -``` - -# Usage, called from another program: - -```golang -package main - -import ( - "github.com/nuclio/nuclio-test-go" -) - -func main() { - // data binding for V3IO data containers, optional (use nil instead of &data) - data := nutest.DataBind{Name:"db0", Url:""", Container:"x"} - - // Create TestContext and specify the function name, verbose, data - tc, err := nutest.NewTestContext(MyHandler, true, &data ) - if err != nil { - panic(err) - } - - // Create a new test event - testEvent := nutest.TestEvent{ - Path: "/some/path", - Body: []byte("1234"), - Headers:map[string]interface{}{"first": "something"}, - } - - // invoke the tested function with the new event and log it's output - resp, err := tc.Invoke(&testEvent) - tc.Logger.InfoWith("Run complete", "resp", resp, "err", err) -} -``` diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/event.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/event.go deleted file mode 100644 index 5d9ed9d9..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/event.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nutest - -import ( - "github.com/nuclio/nuclio-sdk-go" - "github.com/pkg/errors" - "time" -) - -type TestEvent struct { - nuclio.AbstractEvent - - Body []byte - ContentType string - id nuclio.ID - emptyByteArray []byte - Headers map[string]interface{} - Path string - URL string - Method string - Time time.Time -} - -var ErrUnsupported = errors.New("Event does not support this interface") - - -func (te *TestEvent) GetContentType() string { - return te.ContentType -} - -func (te *TestEvent) GetBody() []byte { - return te.Body -} - -func (te *TestEvent) GetPath() string { - return te.Path -} - -func (te *TestEvent) GetURL() string { - return te.URL -} - -func (te *TestEvent) GetMethod() string { - return te.Method -} - -func (te *TestEvent) GetHeaders() map[string]interface{} { - return te.Headers -} - -func (te *TestEvent) GetHeader(key string) interface{} { - return te.Headers[key] -} - -func (te *TestEvent) GetHeaderByteSlice(key string) []byte { - value, found := te.Headers[key] - if !found { - return nil - } - - switch typedValue := value.(type) { - case string: - return []byte(typedValue) - case []byte: - return typedValue - default: - return nil - } -} - -func (te *TestEvent) GetHeaderString(key string) string { - return string(te.GetHeaderByteSlice(key)) -} - -func (te *TestEvent) GetTimestamp() time.Time { - return te.Time -} - diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/nutest.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/nutest.go deleted file mode 100644 index 1cd62731..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/nutest.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nutest - -import ( - "github.com/nuclio/zap" - "github.com/nuclio/nuclio-sdk-go" - "github.com/v3io/v3io-go-http" - "github.com/pkg/errors" - "github.com/nuclio/logger" -) - -func NewTestContext(function func(context *nuclio.Context, event nuclio.Event)(interface {}, error), - verbose bool, data *DataBind) (*TestContext, error) { - newTest := TestContext{Data:data} - if verbose { - newTest.LogLevel = nucliozap.DebugLevel - } else { - newTest.LogLevel = nucliozap.WarnLevel - } - - logger, err := nucliozap.NewNuclioZapCmd("emulator", newTest.LogLevel) - if err != nil { - return nil, errors.Wrap(err, "Failed to create logger") - } - - newTest.Logger = logger - - db := map[string]nuclio.DataBinding{} - if data != nil { - container, err := createContainer(logger, data) - if err != nil { - logger.ErrorWith("Failed to createContainer", "err", err) - return nil, errors.Wrap(err, "Failed to createContainer") - } - - if data.Name == "" { - data.Name = "db0" - } - db[data.Name] = container - } - - newTest.context = nuclio.Context{Logger:logger, DataBinding:db} - newTest.function = function - - - return &newTest, nil -} - -type TestContext struct { - LogLevel nucliozap.Level - Logger logger.Logger - Data *DataBind - context nuclio.Context - function func(context *nuclio.Context, event nuclio.Event)(interface {}, error) -} - -func (tc *TestContext) InitContext(function func(context *nuclio.Context) error) error { - return function(&tc.context) -} - -func (tc *TestContext) Invoke(event nuclio.Event) (interface{}, error) { - - body, err := tc.function(&tc.context, event) - if err != nil { - tc.Logger.ErrorWith("Function execution failed", "err", err) - return body, err - } - tc.Logger.InfoWith("Function completed","output",body) - - return body, err -} - -func createContainer(logger logger.Logger, db *DataBind) (*v3io.Container, error) { - // create context - context, err := v3io.NewContext(logger, db.Url , 8) - if err != nil { - return nil, errors.Wrap(err, "Failed to create client") - } - - // create session - session, err := context.NewSession(db.User, db.Password, "v3test") - if err != nil { - return nil, errors.Wrap(err, "Failed to create session") - } - - // create the container - container, err := session.NewContainer(db.Container) - if err != nil { - return nil, errors.Wrap(err, "Failed to create container") - } - - return container, nil -} - -type DataBind struct { - Name string - Url string - Container string - User string - Password string -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/README.md b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/README.md deleted file mode 100644 index fe510b1a..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/README.md +++ /dev/null @@ -1 +0,0 @@ -# zap diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/buffer.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/buffer.go deleted file mode 100644 index e9d41e3e..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/buffer.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nucliozap - -import ( - "bytes" - "encoding/json" - "fmt" - "time" - - "github.com/pkg/errors" -) - -var ErrBufferPoolAllocationTimeout = errors.New("Timed out waiting for buffer logger") - -// a logger who outputs the records to a buffer -type BufferLogger struct { - encoding string - Logger *NuclioZap - Buffer *bytes.Buffer -} - -func NewBufferLogger(name string, encoding string, level Level) (*BufferLogger, error) { - writer := &bytes.Buffer{} - - // create a logger that is able to capture the output into a buffer. if a request arrives - // and the user wishes to capture the log, this will be used as the logger instead of the default - // logger - newLogger, err := NewNuclioZap(name, - encoding, - nil, - writer, - writer, - level) - - if err != nil { - return nil, errors.Wrap(err, "Failed to create buffer logger") - } - - return &BufferLogger{ - Logger: newLogger, - Buffer: writer, - encoding: encoding, - }, nil -} - -func (bl *BufferLogger) GetJSONString() (string, error) { - if bl.encoding != "json" { - return "", fmt.Errorf("Can only return JSON when encoding is JSON, not %s", bl.encoding) - } - - jsonBody := bl.Buffer.Bytes() - if len(jsonBody) != 0 { - - // remove last comma - jsonBody = jsonBody[:len(jsonBody)-1] - } - - return "[" + string(jsonBody) + "]", nil -} - -func (bl *BufferLogger) GetLogEntries() ([]map[string]interface{}, error) { - jsonBody, err := bl.GetJSONString() - if err != nil { - return nil, errors.Wrap(err, "Failed to get JSON string") - } - - unmarshalledJSONBody := []map[string]interface{}{} - - err = json.Unmarshal([]byte(jsonBody), &unmarshalledJSONBody) - if err != nil { - return nil, errors.Wrap(err, "Failed to unmarshal JSON body") - } - - return unmarshalledJSONBody, nil -} - -// a pool for buffer loggers -type BufferLoggerPool struct { - bufferLoggerChan chan *BufferLogger - defaultAllocateTimeout time.Duration -} - -// a pool of buffer loggers -func NewBufferLoggerPool(numBufferLoggers int, - name string, - encoding string, - level Level) (*BufferLoggerPool, error) { - - // create a channel for the buffer loggers - bufferLoggersChan := make(chan *BufferLogger, numBufferLoggers) - - // create buffer loggers - for bufferLoggerIdx := 0; bufferLoggerIdx < numBufferLoggers; bufferLoggerIdx++ { - newBufferLogger, err := NewBufferLogger(name, encoding, level) - if err != nil { - return nil, errors.Wrap(err, "Failed to create buffer logger") - } - - // shove to channel - bufferLoggersChan <- newBufferLogger - } - - return &BufferLoggerPool{ - bufferLoggerChan: bufferLoggersChan, - defaultAllocateTimeout: 10 * time.Second, - }, nil -} - -func (blp *BufferLoggerPool) Allocate(timeout *time.Duration) (*BufferLogger, error) { - if timeout == nil { - timeout = &blp.defaultAllocateTimeout - } - - select { - case bufferLogger := <-blp.bufferLoggerChan: - - // clear the buffer - bufferLogger.Buffer.Reset() - - return bufferLogger, nil - case <-time.After(*timeout): - return nil, ErrBufferPoolAllocationTimeout - } -} - -func (blp *BufferLoggerPool) Release(bufferLogger *BufferLogger) { - blp.bufferLoggerChan <- bufferLogger -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/logger.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/logger.go deleted file mode 100644 index 30f172bc..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/logger.go +++ /dev/null @@ -1,458 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nucliozap - -import ( - "context" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/mgutz/ansi" - "github.com/nuclio/logger" - "github.com/pavius/zap" - "github.com/pavius/zap/zapcore" -) - -type EncoderConfigJSON struct { - LineEnding string - VarGroupName string - TimeFieldName string - TimeFieldEncoding string -} - -type EncoderConfigConsole struct { -} - -type EncoderConfig struct { - JSON EncoderConfigJSON - Console EncoderConfigConsole -} - -func NewEncoderConfig() *EncoderConfig { - return &EncoderConfig{ - JSON: EncoderConfigJSON{ - LineEnding: ",", - TimeFieldName: "time", - TimeFieldEncoding: "epoch-millis", - }, - } -} - -// Level is logging levels -type Level int8 - -// Predefined logging levels -const ( - DebugLevel Level = Level(zapcore.DebugLevel) - InfoLevel Level = Level(zapcore.InfoLevel) - WarnLevel Level = Level(zapcore.WarnLevel) - ErrorLevel Level = Level(zapcore.ErrorLevel) - DPanicLevel Level = Level(zapcore.DPanicLevel) - PanicLevel Level = Level(zapcore.PanicLevel) - FatalLevel Level = Level(zapcore.FatalLevel) -) - -type writerWrapper struct { - io.Writer -} - -func (w writerWrapper) Sync() error { - return nil -} - -// NuclioZap is a concrete implementation of the nuclio logger interface, using zap -type NuclioZap struct { - *zap.SugaredLogger - atomicLevel zap.AtomicLevel - coloredLevelDebug string - coloredLevelInfo string - coloredLevelWarn string - coloredLevelError string - colorLoggerName func(string) string - customEncoderConfig *EncoderConfig - encoding string -} - -// NewNuclioZap create a configurable logger -func NewNuclioZap(name string, - encoding string, - customEncoderConfig *EncoderConfig, - sink io.Writer, - errSink io.Writer, - level Level) (*NuclioZap, error) { - newNuclioZap := &NuclioZap{ - atomicLevel: zap.NewAtomicLevelAt(zapcore.Level(level)), - customEncoderConfig: customEncoderConfig, - encoding: encoding, - } - - if customEncoderConfig == nil { - customEncoderConfig = NewEncoderConfig() - } - - // create an encoder configuration - encoderConfig := newNuclioZap.getEncoderConfig(encoding, customEncoderConfig) - - // create a sane configuration - config := zap.Config{ - Level: newNuclioZap.atomicLevel, - Development: true, - Encoding: encoding, - EncoderConfig: *encoderConfig, - OutputWriters: []zapcore.WriteSyncer{writerWrapper{sink}}, - ErrorOutputWriters: []zapcore.WriteSyncer{writerWrapper{errSink}}, - DisableStacktrace: true, - } - - newZapLogger, err := config.Build() - if err != nil { - return nil, err - } - - newNuclioZap.SugaredLogger = newZapLogger.Sugar().Named(name) - - // initialize coloring by level - newNuclioZap.initializeColors() - - return newNuclioZap, nil -} - -// We use this istead of testing.Verbose since we don't want to get testing flags in our code -func isVerboseTesting() bool { - for _, arg := range os.Args { - if arg == "-test.v=true" || arg == "-test.v" { - return true - } - } - return false -} - -// NewNuclioZapTest creates a logger pre-configured for tests -func NewNuclioZapTest(name string) (*NuclioZap, error) { - var loggerLevel Level - - if isVerboseTesting() { - loggerLevel = DebugLevel - } else { - loggerLevel = InfoLevel - } - - return NewNuclioZapCmd(name, loggerLevel) -} - -// NewNuclioZapCmd creates a logger pre-configured for commands -func NewNuclioZapCmd(name string, level Level) (*NuclioZap, error) { - return NewNuclioZap(name, "console", nil, os.Stdout, os.Stdout, level) -} - -// GetLevelByName return logging level by name -func GetLevelByName(levelName string) Level { - switch levelName { - case "info": - return Level(zapcore.InfoLevel) - case "warn": - return Level(zapcore.WarnLevel) - case "error": - return Level(zapcore.ErrorLevel) - case "dpanic": - return Level(zapcore.DPanicLevel) - case "panic": - return Level(zapcore.PanicLevel) - case "fatal": - return Level(zapcore.FatalLevel) - default: - return Level(zapcore.DebugLevel) - } -} - -// SetLevel sets the logging level -func (nz *NuclioZap) SetLevel(level Level) { - nz.atomicLevel.SetLevel(zapcore.Level(level)) -} - -// GetLevel returns the current logging level -func (nz *NuclioZap) GetLevel() Level { - return Level(nz.atomicLevel.Level()) -} - -// Errors emits error level log -func (nz *NuclioZap) Error(format interface{}, vars ...interface{}) { - formatString, formatIsString := format.(string) - if formatIsString { - nz.SugaredLogger.Errorf(formatString, vars...) - } else { - nz.SugaredLogger.Error(format) - } -} - -// ErrorCtx emits an unstructured debug log with context -func (nz *NuclioZap) ErrorCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Errorw(nz.getFormatWithContext(ctx, format), nz.prepareVars(vars)...) -} - -// ErrorWith emits error level log with arguments -func (nz *NuclioZap) ErrorWith(format interface{}, vars ...interface{}) { - nz.SugaredLogger.Errorw(format.(string), vars...) -} - -// ErrorWithCtx emits debug level log with arguments -func (nz *NuclioZap) ErrorWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Errorw(format.(string), nz.addContextToVars(ctx, nz.prepareVars(vars))...) -} - -// Warn emits warn level log -func (nz *NuclioZap) Warn(format interface{}, vars ...interface{}) { - formatString, formatIsString := format.(string) - if formatIsString { - nz.SugaredLogger.Warnf(formatString, vars...) - } else { - nz.SugaredLogger.Warn(format) - } -} - -// WarnCtx emits an unstructured debug log with context -func (nz *NuclioZap) WarnCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Warnw(nz.getFormatWithContext(ctx, format), nz.prepareVars(vars)...) -} - -// WarnWith emits warn level log with arguments -func (nz *NuclioZap) WarnWith(format interface{}, vars ...interface{}) { - nz.SugaredLogger.Warnw(format.(string), vars...) -} - -// WarnWithCtx emits debug level log with arguments -func (nz *NuclioZap) WarnWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Warnw(format.(string), nz.addContextToVars(ctx, nz.prepareVars(vars))...) -} - -// Info emits info level log -func (nz *NuclioZap) Info(format interface{}, vars ...interface{}) { - formatString, formatIsString := format.(string) - if formatIsString { - nz.SugaredLogger.Infof(formatString, vars...) - } else { - nz.SugaredLogger.Info(format) - } -} - -// InfoCtx emits an unstructured debug log with context -func (nz *NuclioZap) InfoCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Infow(nz.getFormatWithContext(ctx, format), nz.prepareVars(vars)...) -} - -// InfoWith emits info level log with arguments -func (nz *NuclioZap) InfoWith(format interface{}, vars ...interface{}) { - nz.SugaredLogger.Infow(format.(string), nz.prepareVars(vars)...) -} - -// InfoWithCtx emits debug level log with arguments -func (nz *NuclioZap) InfoWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Infow(format.(string), nz.addContextToVars(ctx, nz.prepareVars(vars))...) -} - -// Debug emits debug level log -func (nz *NuclioZap) Debug(format interface{}, vars ...interface{}) { - formatString, formatIsString := format.(string) - if formatIsString { - nz.SugaredLogger.Debugf(formatString, vars...) - } else { - nz.SugaredLogger.Debug(format) - } -} - -// DebugCtx emits an unstructured debug log with context -func (nz *NuclioZap) DebugCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Debugw(nz.getFormatWithContext(ctx, format), nz.prepareVars(vars)...) -} - -// DebugWith emits debug level log with arguments -func (nz *NuclioZap) DebugWith(format interface{}, vars ...interface{}) { - nz.SugaredLogger.Debugw(format.(string), nz.prepareVars(vars)...) -} - -// DebugWithCtx emits debug level log with arguments -func (nz *NuclioZap) DebugWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Debugw(format.(string), nz.addContextToVars(ctx, nz.prepareVars(vars))...) -} - -// Flush flushes the log -func (nz *NuclioZap) Flush() { - nz.Sync() -} - -// GetChild returned a named child logger -func (nz *NuclioZap) GetChild(name string) logger.Logger { - return &NuclioZap{ - SugaredLogger: nz.Named(name), - encoding: nz.encoding, - customEncoderConfig: nz.customEncoderConfig, - } -} - -func (nz *NuclioZap) encodeLoggerName(loggerName string, enc zapcore.PrimitiveArrayEncoder) { - const maxLoggerNameLength = 25 - actualLoggerNameLength := len(loggerName) - var encodedLoggerName string - - if actualLoggerNameLength >= maxLoggerNameLength { - encodedLoggerName = loggerName[actualLoggerNameLength-maxLoggerNameLength:] - - } else { - encodedLoggerName = strings.Repeat(" ", maxLoggerNameLength-actualLoggerNameLength) + loggerName - } - - // just truncate - enc.AppendString(nz.colorLoggerName(encodedLoggerName)) -} - -func (nz *NuclioZap) encodeStdoutLevel(level zapcore.Level, enc zapcore.PrimitiveArrayEncoder) { - switch level { - case zapcore.InfoLevel: - enc.AppendString(nz.coloredLevelInfo) - return - case zapcore.WarnLevel: - enc.AppendString(nz.coloredLevelWarn) - return - case zapcore.ErrorLevel: - enc.AppendString(nz.coloredLevelError) - return - } - - enc.AppendString(nz.coloredLevelDebug) -} - -func (nz *NuclioZap) encodeStdoutTime(t time.Time, enc zapcore.PrimitiveArrayEncoder) { - enc.AppendString(t.Format("06.01.02 15:04:05.000")) -} - -func (nz *NuclioZap) initializeColors() { - nz.coloredLevelDebug = ansi.Color("(D)", "green") - nz.coloredLevelInfo = ansi.Color("(I)", "blue") - nz.coloredLevelWarn = ansi.Color("(W)", "yellow") - nz.coloredLevelError = ansi.Color("(E)", "red") - - nz.colorLoggerName = ansi.ColorFunc("white") -} - -func (nz *NuclioZap) getEncoderConfig(encoding string, encoderConfig *EncoderConfig) *zapcore.EncoderConfig { - if encoding == "console" { - return &zapcore.EncoderConfig{ - TimeKey: "time", - LevelKey: "level", - NameKey: "name", - CallerKey: "", - MessageKey: "message", - StacktraceKey: "stack", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: nz.encodeStdoutLevel, - EncodeTime: nz.encodeStdoutTime, - EncodeDuration: zapcore.StringDurationEncoder, - EncodeCaller: func(zapcore.EntryCaller, zapcore.PrimitiveArrayEncoder) {}, - EncodeLoggerName: nz.encodeLoggerName, - } - } - - var timeEncoder zapcore.TimeEncoder - switch encoderConfig.JSON.TimeFieldEncoding { - case "iso8601": - timeEncoder = zapcore.ISO8601TimeEncoder - default: - timeEncoder = zapcore.EpochMillisTimeEncoder - } - - return &zapcore.EncoderConfig{ - TimeKey: encoderConfig.JSON.TimeFieldName, - LevelKey: "level", - NameKey: "name", - CallerKey: "", - MessageKey: "message", - StacktraceKey: "stack", - LineEnding: encoderConfig.JSON.LineEnding, - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: timeEncoder, - EncodeDuration: zapcore.SecondsDurationEncoder, - EncodeCaller: func(zapcore.EntryCaller, zapcore.PrimitiveArrayEncoder) {}, - EncodeLoggerName: zapcore.FullLoggerNameEncoder, - } -} - -func (nz *NuclioZap) addContextToVars(ctx context.Context, vars []interface{}) []interface{} { - if ctx == nil { - return vars - } - - // get request ID from context - requestID := ctx.Value("RequestID") - - // if not set, don't add it to vars - if requestID == nil || requestID == "" { - return vars - } - - // create a slice 2 slots larger - varsWithContext := make([]interface{}, 0, len(vars)+2) - varsWithContext = append(varsWithContext, "requestID") - varsWithContext = append(varsWithContext, requestID) - varsWithContext = append(varsWithContext, vars...) - - return varsWithContext -} - -func (nz *NuclioZap) getFormatWithContext(ctx context.Context, format interface{}) string { - formatString := format.(string) - - // get request ID from context - requestID := ctx.Value("RequestID") - - // if not set, don't add it to vars - if requestID == nil || requestID == "" { - return formatString - } - - return formatString + fmt.Sprintf(" (requestID: %s)", requestID) -} - -func (nz *NuclioZap) prepareVars(vars []interface{}) []interface{} { - if nz.encoding != "json" || nz.customEncoderConfig == nil || nz.customEncoderConfig.JSON.VarGroupName == "" { - return vars - } - - // must be an even number of parameters - if len(vars)&0x1 != 0 { - panic("Odd number of logging vars - must be key/value") - } - - formattedVars := "" - - // create key=value pairs - for varIndex := 0; varIndex < len(vars); varIndex += 2 { - formattedVars += fmt.Sprintf("%s=%+v || ", vars[varIndex], vars[varIndex+1]) - } - - // if nothing was created, don't generate a group - if len(formattedVars) == 0 { - return []interface{}{} - } - - return []interface{}{ - nz.customEncoderConfig.JSON.VarGroupName, - formattedVars[:len(formattedVars)-4], - } -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/mux.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/mux.go deleted file mode 100644 index 2747b416..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/mux.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nucliozap - -import ( - "context" - - "github.com/nuclio/logger" -) - -// a logger that multiplexes logs towards multiple loggers -type MuxLogger struct { - loggers []logger.Logger -} - -func NewMuxLogger(loggers ...logger.Logger) (*MuxLogger, error) { - return &MuxLogger{loggers: loggers}, nil -} - -func (ml *MuxLogger) SetLoggers(loggers ...logger.Logger) { - ml.loggers = loggers -} - -func (ml *MuxLogger) Error(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.Error(format, vars...) - } -} - -func (ml *MuxLogger) ErrorCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.ErrorCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) Warn(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.Warn(format, vars...) - } -} - -func (ml *MuxLogger) WarnCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.WarnCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) Info(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.Info(format, vars...) - } -} - -func (ml *MuxLogger) InfoCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.InfoCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) Debug(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.Debug(format, vars...) - } -} - -func (ml *MuxLogger) DebugCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.DebugCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) ErrorWith(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.ErrorWith(format, vars...) - } -} - -func (ml *MuxLogger) ErrorWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.ErrorWithCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) WarnWith(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.WarnWith(format, vars...) - } -} - -func (ml *MuxLogger) WarnWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.WarnWithCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) InfoWith(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.InfoWith(format, vars...) - } -} - -func (ml *MuxLogger) InfoWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.InfoWithCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) DebugWith(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.DebugWith(format, vars...) - } -} - -func (ml *MuxLogger) DebugWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.DebugWithCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) Flush() { -} - -func (ml *MuxLogger) GetChild(name string) logger.Logger { - return ml -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go index 05157edf..43c9a03d 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go @@ -18,7 +18,6 @@ package v3io // A container interface allows perform actions against a container type Container interface { - // // Container // @@ -77,7 +76,7 @@ type Container interface { PutItem(*PutItemInput, interface{}, chan *Response) (*Request, error) // PutItemSync - PutItemSync(*PutItemInput) error + PutItemSync(*PutItemInput) (*Response, error) // PutItems PutItems(*PutItemsInput, interface{}, chan *Response) (*Request, error) @@ -89,7 +88,7 @@ type Container interface { UpdateItem(*UpdateItemInput, interface{}, chan *Response) (*Request, error) // UpdateItemSync - UpdateItemSync(*UpdateItemInput) error + UpdateItemSync(*UpdateItemInput) (*Response, error) // // Stream diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go index 5a9455df..986ac8b1 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go @@ -16,9 +16,14 @@ limitations under the License. package v3io +import "time" + type Context interface { Container // create a new session NewSession(*NewSessionInput) (Session, error) + + // stops a context + Stop(*time.Duration) error } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go index 941029bf..f272ce38 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go @@ -67,7 +67,7 @@ func (c *container) PutItem(putItemInput *v3io.PutItemInput, } // PutItemSync -func (c *container) PutItemSync(putItemInput *v3io.PutItemInput) error { +func (c *container) PutItemSync(putItemInput *v3io.PutItemInput) (*v3io.Response, error) { c.populateInputFields(&putItemInput.DataPlaneInput) return c.session.context.PutItemSync(putItemInput) } @@ -95,7 +95,7 @@ func (c *container) UpdateItem(updateItemInput *v3io.UpdateItemInput, } // UpdateItemSync -func (c *container) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) error { +func (c *container) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) (*v3io.Response, error) { c.populateInputFields(&updateItemInput.DataPlaneInput) return c.session.context.UpdateItemSync(updateItemInput) } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go index 90d50cea..f475d354 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go @@ -32,12 +32,24 @@ import ( // TODO: Request should have a global pool var requestID uint64 +var ErrContextStopped = errors.New("Context stopped") + +type inactivityMonitorRequest int + +const ( + inactivityMonitorRequestReset inactivityMonitorRequest = 0 + inactivityMonitorRequestStop inactivityMonitorRequest = 1 +) + type context struct { - logger logger.Logger - requestChan chan *v3io.Request - httpClient *fasthttp.Client - clusterEndpoints []string - numWorkers int + logger logger.Logger + requestChan chan *v3io.Request + httpClient *fasthttp.Client + clusterEndpoints []string + numWorkers int + inactivityMonitorTimer *time.Timer + inactivityMonitorChan chan inactivityMonitorRequest + inactivityMonitorTimeout time.Duration } func NewClient(tlsConfig *tls.Config, dialTimeout time.Duration) *fasthttp.Client { @@ -48,6 +60,7 @@ func NewClient(tlsConfig *tls.Config, dialTimeout time.Duration) *fasthttp.Clien if dialTimeout == 0 { dialTimeout = fasthttp.DefaultDialTimeout } + dialFunction := func(addr string) (net.Conn, error) { return fasthttp.DialTimeout(addr, dialTimeout) } @@ -74,19 +87,40 @@ func NewContext(parentLogger logger.Logger, client *fasthttp.Client, newContextI } newContext := &context{ - logger: parentLogger.GetChild("context.http"), - httpClient: client, - requestChan: make(chan *v3io.Request, requestChanLen), - numWorkers: numWorkers, + logger: parentLogger.GetChild("context.http"), + httpClient: client, + requestChan: make(chan *v3io.Request, requestChanLen), + numWorkers: numWorkers, + inactivityMonitorTimeout: newContextInput.InactivityTimeout, } for workerIndex := 0; workerIndex < numWorkers; workerIndex++ { go newContext.workerEntry(workerIndex) } + if newContext.inactivityMonitorTimeout != 0 { + newContext.inactivityMonitorChan = make(chan inactivityMonitorRequest, newContext.numWorkers) + newContext.inactivityMonitorTimer = time.NewTimer(newContext.inactivityMonitorTimeout) + + go newContext.inactivityMonitorEntry() + } + + newContext.logger.DebugWith("Created context", + "numWorkers", numWorkers, + "inactivityMonitorTimeout", newContextInput.InactivityTimeout) + return newContext, nil } +// stops a context +func (c *context) Stop(timeout *time.Duration) error { + if c.inactivityMonitorTimer != nil { + c.inactivityMonitorChan <- inactivityMonitorRequestStop + } + + return c.stop("User requested stop", timeout) +} + // create a new session func (c *context) NewSession(newSessionInput *v3io.NewSessionInput) (v3io.Session, error) { return newSession(c.logger, @@ -308,7 +342,7 @@ func (c *context) PutItem(putItemInput *v3io.PutItemInput, } // PutItemSync -func (c *context) PutItemSync(putItemInput *v3io.PutItemInput) error { +func (c *context) PutItemSync(putItemInput *v3io.PutItemInput) (*v3io.Response, error) { var body map[string]interface{} if putItemInput.UpdateMode != "" { body = map[string]interface{}{ @@ -317,7 +351,7 @@ func (c *context) PutItemSync(putItemInput *v3io.PutItemInput) error { } // prepare the query path - _, err := c.putItem(&putItemInput.DataPlaneInput, + response, err := c.putItem(&putItemInput.DataPlaneInput, putItemInput.Path, putItemFunctionName, putItemInput.Attributes, @@ -325,7 +359,13 @@ func (c *context) PutItemSync(putItemInput *v3io.PutItemInput) error { putItemHeaders, body) - return err + mtimeSecs, mtimeNSecs, err := parseMtimeHeader(response) + if err != nil { + return nil, err + } + response.Output = &v3io.PutItemOutput{MtimeSecs: mtimeSecs, MtimeNSecs: mtimeNSecs} + + return response, err } // PutItems @@ -386,8 +426,9 @@ func (c *context) UpdateItem(updateItemInput *v3io.UpdateItemInput, } // UpdateItemSync -func (c *context) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) error { +func (c *context) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) (*v3io.Response, error) { var err error + var response *v3io.Response if updateItemInput.Attributes != nil { @@ -400,7 +441,7 @@ func (c *context) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) error { body["UpdateMode"] = updateItemInput.UpdateMode } - _, err = c.putItem(&updateItemInput.DataPlaneInput, + response, err = c.putItem(&updateItemInput.DataPlaneInput, updateItemInput.Path, putItemFunctionName, updateItemInput.Attributes, @@ -408,18 +449,31 @@ func (c *context) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) error { putItemHeaders, body) + mtimeSecs, mtimeNSecs, err := parseMtimeHeader(response) + if err != nil { + return nil, err + } + response.Output = &v3io.UpdateItemOutput{MtimeSecs: mtimeSecs, MtimeNSecs: mtimeNSecs} + } else if updateItemInput.Expression != nil { - _, err = c.updateItemWithExpression(&updateItemInput.DataPlaneInput, + response, err = c.updateItemWithExpression(&updateItemInput.DataPlaneInput, updateItemInput.Path, updateItemFunctionName, *updateItemInput.Expression, updateItemInput.Condition, updateItemHeaders, updateItemInput.UpdateMode) + + mtimeSecs, mtimeNSecs, err := parseMtimeHeader(response) + if err != nil { + return nil, err + } + response.Output = &v3io.UpdateItemOutput{MtimeSecs: mtimeSecs, MtimeNSecs: mtimeNSecs} + } - return err + return response, err } // GetObject @@ -764,7 +818,6 @@ func (c *context) updateItemWithExpression(dataPlaneInput *v3io.DataPlaneInput, body["UpdateMode"] = updateMode } - if condition != "" { body["ConditionExpression"] = condition } @@ -822,6 +875,11 @@ func (c *context) sendRequest(dataPlaneInput *v3io.DataPlaneInput, var statusCode int var err error + // if there's an inactivity timer, reset it + if c.inactivityMonitorTimer != nil { + c.inactivityMonitorChan <- inactivityMonitorRequestReset + } + if dataPlaneInput.ContainerName == "" { return nil, errors.New("ContainerName must not be empty") } @@ -919,7 +977,7 @@ func (c *context) buildRequestURI(urlString string, containerName string, query if strings.HasSuffix(pathStr, "/") { uri.Path += "/" // retain trailing slash } - uri.RawQuery = strings.Replace(query, " ", "%20", -1) + uri.RawQuery = strings.ReplaceAll(query, " ", "%20") return uri, nil } @@ -1071,63 +1129,79 @@ func (c *context) sendRequestToWorker(input interface{}, func (c *context) workerEntry(workerIndex int) { for { - var response *v3io.Response - var err error - - // read a request request := <-c.requestChan - // according to the input type - switch typedInput := request.Input.(type) { - case *v3io.PutObjectInput: - err = c.PutObjectSync(typedInput) - case *v3io.GetObjectInput: - response, err = c.GetObjectSync(typedInput) - case *v3io.DeleteObjectInput: - err = c.DeleteObjectSync(typedInput) - case *v3io.GetItemInput: - response, err = c.GetItemSync(typedInput) - case *v3io.GetItemsInput: - response, err = c.GetItemsSync(typedInput) - case *v3io.PutItemInput: - err = c.PutItemSync(typedInput) - case *v3io.PutItemsInput: - response, err = c.PutItemsSync(typedInput) - case *v3io.UpdateItemInput: - err = c.UpdateItemSync(typedInput) - case *v3io.CreateStreamInput: - err = c.CreateStreamSync(typedInput) - case *v3io.DeleteStreamInput: - err = c.DeleteStreamSync(typedInput) - case *v3io.GetRecordsInput: - response, err = c.GetRecordsSync(typedInput) - case *v3io.PutRecordsInput: - response, err = c.PutRecordsSync(typedInput) - case *v3io.SeekShardInput: - response, err = c.SeekShardSync(typedInput) - case *v3io.GetContainersInput: - response, err = c.GetContainersSync(typedInput) - case *v3io.GetContainerContentsInput: - response, err = c.GetContainerContentsSync(typedInput) - default: - c.logger.ErrorWith("Got unexpected request type", "type", reflect.TypeOf(request.Input).String()) + if err := c.handleRequest(workerIndex, request); err != nil { + if err == ErrContextStopped { + return + } } + } +} - // TODO: have the sync interfaces somehow use the pre-allocated response - if response != nil { - request.RequestResponse.Response = *response +func (c *context) handleRequest(workerIndex int, request *v3io.Request) error { + var response *v3io.Response + var err error + + // according to the input type + switch typedInput := request.Input.(type) { + case *v3io.PutObjectInput: + err = c.PutObjectSync(typedInput) + case *v3io.GetObjectInput: + response, err = c.GetObjectSync(typedInput) + case *v3io.DeleteObjectInput: + err = c.DeleteObjectSync(typedInput) + case *v3io.GetItemInput: + response, err = c.GetItemSync(typedInput) + case *v3io.GetItemsInput: + response, err = c.GetItemsSync(typedInput) + case *v3io.PutItemInput: + response, err = c.PutItemSync(typedInput) + case *v3io.PutItemsInput: + response, err = c.PutItemsSync(typedInput) + case *v3io.UpdateItemInput: + response, err = c.UpdateItemSync(typedInput) + case *v3io.CreateStreamInput: + err = c.CreateStreamSync(typedInput) + case *v3io.DeleteStreamInput: + err = c.DeleteStreamSync(typedInput) + case *v3io.GetRecordsInput: + response, err = c.GetRecordsSync(typedInput) + case *v3io.PutRecordsInput: + response, err = c.PutRecordsSync(typedInput) + case *v3io.SeekShardInput: + response, err = c.SeekShardSync(typedInput) + case *v3io.GetContainersInput: + response, err = c.GetContainersSync(typedInput) + case *v3io.GetContainerContentsInput: + response, err = c.GetContainerContentsSync(typedInput) + case *v3io.StopContextInput: + response = &v3io.Response{ + Output: &v3io.StopContextOutput{ + WorkerIndex: workerIndex, + }, } + err = ErrContextStopped + default: + c.logger.ErrorWith("Got unexpected request type", "type", reflect.TypeOf(request.Input).String()) + } - response = &request.RequestResponse.Response + // TODO: have the sync interfaces somehow use the pre-allocated response + if response != nil { + request.RequestResponse.Response = *response + } - response.ID = request.ID - response.Error = err - response.RequestResponse = request.RequestResponse - response.Context = request.Context + response = &request.RequestResponse.Response - // write to response channel - request.ResponseChan <- &request.RequestResponse.Response - } + response.ID = request.ID + response.Error = err + response.RequestResponse = request.RequestResponse + response.Context = request.Context + + // write to response channel + request.ResponseChan <- &request.RequestResponse.Response + + return err } func readAllCapnpMessages(reader io.Reader) []*capnp.Message { @@ -1204,7 +1278,7 @@ func decodeCapnpAttributes(keyValues node_common_capnp.VnObjectItemsGetMappedKey func (c *context) getItemsParseJSONResponse(response *v3io.Response, getItemsInput *v3io.GetItemsInput) (*v3io.GetItemsOutput, error) { getItemsResponse := struct { - Items []map[string]map[string]interface{} + Items []map[string]map[string]interface{} NextMarker string LastItemIncluded string }{} @@ -1345,3 +1419,108 @@ func (c *context) getItemsParseCAPNPResponse(response *v3io.Response, withWildca } return &getItemsOutput, nil } + +func (c *context) inactivityMonitorEntry() { + c.logger.DebugWith("Inactivity monitor starting", + "timeout", c.inactivityMonitorTimeout) + + inactivityMonitorTimerExpired := false + + for !inactivityMonitorTimerExpired { + select { + case request := <-c.inactivityMonitorChan: + switch request { + case inactivityMonitorRequestStop: + c.logger.Debug("Inactivity monitor requested to stop") + return + case inactivityMonitorRequestReset: + c.inactivityMonitorTimer.Reset(c.inactivityMonitorTimeout) + } + + case <-c.inactivityMonitorTimer.C: + inactivityMonitorTimerExpired = true + } + } + + // force stop + c.stop("Inactivity timout expired", nil) // nolint: errcheck +} + +func (c *context) stop(reason string, timeout *time.Duration) error { + var workerStoppedChan chan *v3io.Response + + timeoutStr := "None" + if timeout != nil { + timeoutStr = timeout.String() + } + + c.logger.DebugWith("Stopping context", + "reason", reason, + "timeout", timeoutStr) + + workerStoppedChan = make(chan *v3io.Response, c.numWorkers) + + // it's guaranteed that a single worker will not read two messages from the queue, so + // each worker should receive a single stop request + for workerIdx := 0; workerIdx < c.numWorkers; workerIdx++ { + _, err := c.sendRequestToWorker(&v3io.StopContextInput{Reason: reason}, + nil, + workerStoppedChan) + + if err != nil { + return errors.Wrap(err, "Failed to send request to worker") + } + } + + // if timeout is set, wait for all workers to stop + if timeout != nil { + deadline := time.After(*timeout) + workersStopped := 0 + + // while not all workers stopped, wait for them to stop + for workersStopped < c.numWorkers { + select { + case <-workerStoppedChan: + workersStopped++ + case <-deadline: + return errors.New("Timed out waiting for context to stop") + } + } + } + + c.logger.DebugWith("Context stopped") + + return nil +} + +// parsing the mtime from a header of the form `__mtime_secs==1581605100 and __mtime_nsecs==498349956` +func parseMtimeHeader(response *v3io.Response) (int, int, error) { + var mtimeSecs, mtimeNSecs int + var err error + + mtimeHeader := string(response.HeaderPeek("X-v3io-transaction-verifier")) + for _, expression := range strings.Split(mtimeHeader, "and") { + mtimeParts := strings.Split(expression, "==") + mtimeType := strings.TrimSpace(mtimeParts[0]) + if mtimeType == "__mtime_secs" { + mtimeSecs, err = trimAndParseInt(mtimeParts[1]) + if err != nil { + return 0, 0, err + } + } else if mtimeType == "__mtime_nsecs" { + mtimeNSecs, err = trimAndParseInt(mtimeParts[1]) + if err != nil { + return 0, 0, err + } + } else { + return 0, 0, fmt.Errorf("failed to parse 'X-v3io-transaction-verifier', unexpected symbol '%v' ", mtimeType) + } + } + + return mtimeSecs, mtimeNSecs, nil +} + +func trimAndParseInt(str string) (int, error) { + trimmed := strings.TrimSpace(str) + return strconv.Atoi(trimmed) +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go index 56662277..599000d5 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go @@ -31,9 +31,18 @@ import ( // type NewContextInput struct { - Client *fasthttp.Client - NumWorkers int - RequestChanLen int + Client *fasthttp.Client + NumWorkers int + RequestChanLen int + InactivityTimeout time.Duration +} + +type StopContextInput struct { + Reason string +} + +type StopContextOutput struct { + WorkerIndex int } type NewSessionInput struct { @@ -192,6 +201,12 @@ type PutItemInput struct { UpdateMode string } +type PutItemOutput struct { + DataPlaneInput + MtimeSecs int + MtimeNSecs int +} + type PutItemsInput struct { DataPlaneInput Path string @@ -214,6 +229,12 @@ type UpdateItemInput struct { UpdateMode string } +type UpdateItemOutput struct { + DataPlaneInput + MtimeSecs int + MtimeNSecs int +} + type GetItemInput struct { DataPlaneInput Path string diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/modules.txt b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/modules.txt index 8020c644..fdafad5e 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/modules.txt +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/modules.txt @@ -65,7 +65,7 @@ github.com/stretchr/testify/require # github.com/v3io/frames v0.6.8-v0.9.11 github.com/v3io/frames github.com/v3io/frames/pb -# github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6 +# github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d github.com/v3io/v3io-go/pkg/dataplane github.com/v3io/v3io-go/pkg/errors github.com/v3io/v3io-go/pkg/dataplane/http diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/Makefile b/functions/query/vendor/github.com/v3io/v3io-tsdb/Makefile index 4a6e480e..73465498 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/Makefile +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/Makefile @@ -32,15 +32,7 @@ BUILD_OPTS := -ldflags " \ -X $(CONFIG_PKG).branch=$(GIT_BRANCH)" \ -v -o "$(GOPATH)/bin/$(TSDBCTL_BIN_NAME)" -TSDB_BUILD_COMMAND ?= GO111MODULE="on" CGO_ENABLED=0 go build $(BUILD_OPTS) ./cmd/tsdbctl - -.PHONY: fmt -fmt: - gofmt -l -s -w . - -.PHONY: get -get: - GO111MODULE="on" go mod tidy +TSDB_BUILD_COMMAND ?= CGO_ENABLED=0 go build $(BUILD_OPTS) ./cmd/tsdbctl .PHONY: test test: diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/go.mod b/functions/query/vendor/github.com/v3io/v3io-tsdb/go.mod index 1b88ca5d..0f30f3fd 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/go.mod +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/go.mod @@ -7,12 +7,11 @@ require ( github.com/cpuguy83/go-md2man v1.0.10 // indirect github.com/ghodss/yaml v1.0.0 github.com/imdario/mergo v0.3.7 - github.com/kr/pretty v0.2.0 // indirect + github.com/kr/pretty v0.1.0 // indirect github.com/nuclio/logger v0.0.1 github.com/nuclio/nuclio-sdk-go v0.0.0-20190205170814-3b507fbd0324 github.com/nuclio/nuclio-test-go v0.0.0-20180704132150-0ce6587f8e37 github.com/nuclio/zap v0.0.2 - github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b // indirect github.com/pkg/errors v0.8.1 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a github.com/spf13/cobra v0.0.3 @@ -20,7 +19,7 @@ require ( github.com/stretchr/testify v1.4.0 github.com/tinylib/msgp v1.1.1 // indirect github.com/v3io/frames v0.6.8-v0.9.11 - github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6 + github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d github.com/v3io/v3io-go-http v0.0.0-20190415143924-cc2fbcde6663 // indirect github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 google.golang.org/genproto v0.0.0-20181026194446-8b5d7a19e2d9 // indirect diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/go.sum b/functions/query/vendor/github.com/v3io/v3io-tsdb/go.sum index 9b50f98e..75aa4c29 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/go.sum +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/go.sum @@ -34,8 +34,6 @@ github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -63,8 +61,6 @@ github.com/nuclio/nuclio-test-go v0.0.0-20180704132150-0ce6587f8e37/go.mod h1:aO github.com/nuclio/zap v0.0.0-20180228181516-4a2bd2f9ef28/go.mod h1:SUxPsgePvlyjx6c5MtGdB50pf0IQThtlyLwISLboeuc= github.com/nuclio/zap v0.0.2 h1:rY5PkMOl8CTkqRqIPuxziBiKK6Mq/8oEurfgRnNtqf0= github.com/nuclio/zap v0.0.2/go.mod h1:SUxPsgePvlyjx6c5MtGdB50pf0IQThtlyLwISLboeuc= -github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b h1:yS0+/i6mwRZCdssUd+MkFJkCn/Evh1PlUKCYe3aCtQw= -github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pavius/zap v0.0.0-20180228181622-8d52692529b8 h1:1N/m7VjDY1Pd30Uwv6bLttZVFQm3n8RUK9Ylf2J+4a4= github.com/pavius/zap v0.0.0-20180228181622-8d52692529b8/go.mod h1:6FWOCx06uh50GClv8S2cfk3asqTJs3qq3ZNRtLZE77I= github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8 h1:WqLgmr/wj9TO5Sc6oYPQRAJBxuHE0NTeuVeFnT+FZVo= @@ -80,9 +76,9 @@ github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9 github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/russross/blackfriday v1.5.2+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2+incompatible h1:/YIL6L1Deczl4O/cQ7ZVdrdKwuB6y7EWpw9LkD8xofE= +github.com/russross/blackfriday v1.5.2+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -108,10 +104,9 @@ github.com/v3io/frames v0.6.8-v0.9.11/go.mod h1:V3j8yjzhNNGXjosCBn7Qf8C8jo25Y+7G github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871 h1:myF4tU/HdFWU1UzMdf16cHRbownzsyvL7VKIHqkrSvo= github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871/go.mod h1:QD2Bo64oyTWzeV8RFehXS0hZEDFgOK99/h2a6ErRu6E= github.com/v3io/v3io-go v0.0.0-20191024084247-042df6b5ee40eb60996ab7f4e74ec9aa07d996c4/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= -github.com/v3io/v3io-go v0.0.0-20191120130819-9003ae83f0b673afb88b862d8f46dcc818684450 h1:3JMzABqziU+dBO4NCoIGRhI/NGYPd6d6Zug68nTXQkU= github.com/v3io/v3io-go v0.0.0-20191120130819-9003ae83f0b673afb88b862d8f46dcc818684450/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= -github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6 h1:+52DyMCjcWg6uXAlTe0KgbOsiQqUKrtL9tBPSERhyFg= -github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= +github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d h1:OotbIx7+QYju2DlAAVxWz0QFzBicHLc47u9DJGpVUL4= +github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= github.com/v3io/v3io-go-http v0.0.0-20190221115935-53e2b487c9a2 h1:NJc63wM25iS+ci5z7LVwjWD4QM0QpTQw/fovKzatss0= github.com/v3io/v3io-go-http v0.0.0-20190221115935-53e2b487c9a2/go.mod h1:GXYcR9MxgfbE3BJdkXki5EclvtS8Nxu2RQNLA8hMMog= github.com/v3io/v3io-go-http v0.0.0-20190415143924-cc2fbcde6663 h1:WZcM/GRBAastacksmv5pODbtr8fJ/0/9EsPDpPfXkRk= diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go index 614e095b..8e3fa0d3 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go @@ -122,9 +122,7 @@ type MetricsCache struct { updatesComplete chan int newUpdates chan int - lastMetric uint64 - - // TODO: consider switching to synch.Map (https://golang.org/pkg/sync/#Map) + lastMetric uint64 cacheMetricMap map[cacheKey]*MetricState // TODO: maybe use hash as key & combine w ref cacheRefMap map[uint64]*MetricState // TODO: maybe turn to list + free list, periodically delete old matrics @@ -220,15 +218,6 @@ func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint6 return 0, err } - isValueVariantType := false - // If the value is not of Float type assume it's variant type. - switch v.(type) { - case int, int64, float64, float32: - isValueVariantType = false - default: - isValueVariantType = true - } - name, key, hash := lset.GetKey() err = utils.IsValidMetricName(name) if err != nil { @@ -249,9 +238,11 @@ func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint6 aggrMetrics = append(aggrMetrics, aggrMetric) } } - metric = &MetricState{Lset: lset, key: key, name: name, hash: hash, - aggrs: aggrMetrics, isVariant: isValueVariantType} - + metric = &MetricState{Lset: lset, key: key, name: name, hash: hash, aggrs: aggrMetrics} + // if the (first) value is not float, use variant encoding, TODO: test w schema + if _, ok := v.(float64); !ok { + metric.isVariant = true + } metric.store = NewChunkStore(mc.logger, lset.LabelNames(), false) mc.addMetric(hash, name, metric) } else { @@ -261,18 +252,6 @@ func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint6 err = metric.error() metric.setError(nil) - if isValueVariantType != metric.isVariant { - newValueType := "numeric" - if isValueVariantType { - newValueType = "string" - } - existingValueType := "numeric" - if metric.isVariant { - existingValueType = "string" - } - return 0, errors.Errorf("Cannot append %v type metric to %v type metric.", newValueType, existingValueType) - } - mc.appendTV(metric, t, v) for _, aggrMetric := range aggrMetrics { mc.appendTV(aggrMetric, t, v) diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go index 2d82b0af..11faf119 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go @@ -29,7 +29,6 @@ import ( "github.com/pkg/errors" "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-go/pkg/errors" - "github.com/v3io/v3io-tsdb/pkg/utils" ) // Start event loops for handling metric updates (appends and Get/Update DB responses) @@ -271,7 +270,7 @@ func (mc *MetricsCache) handleResponse(metric *MetricState, resp *v3io.Response, if resp.Error != nil && metric.getState() != storeStateGet { req := reqInput.(*v3io.UpdateItemInput) - mc.logger.WarnWith("I/O failure", "id", resp.ID, "err", resp.Error, "key", metric.key, + mc.logger.ErrorWith("I/O failure", "id", resp.ID, "err", resp.Error, "key", metric.key, "in-flight", mc.updatesInFlight, "mqueue", mc.metricQueue.Length(), "numsamples", metric.store.samplesQueueLength(), "path", req.Path, "update expression", req.Expression) } else { @@ -305,17 +304,8 @@ func (mc *MetricsCache) handleResponse(metric *MetricState, resp *v3io.Response, // Metrics with too many update errors go into Error state metric.retryCount++ if e, hasStatusCode := resp.Error.(v3ioerrors.ErrorWithStatusCode); hasStatusCode && e.StatusCode() != http.StatusServiceUnavailable { - // If condition was evaluated as false log this and report this error upstream. - if utils.IsFalseConditionError(resp.Error) { - req := reqInput.(*v3io.UpdateItemInput) - // This might happen on attempt to add metric value of wrong type, i.e. float <-> string - errMsg := fmt.Sprintf("trying to ingest values of incompatible data type. Metric %q has not been updated.", req.Path) - mc.logger.ErrorWith(errMsg) - setError(mc, metric, errors.Wrap(resp.Error, errMsg)) - } else { - mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed with status code %d.", e.StatusCode())) - setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed due to status code %d.", e.StatusCode()))) - } + mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed with status code %d.", e.StatusCode())) + setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed due to status code %d.", e.StatusCode()))) clear() return false } else if metric.retryCount == maxRetriesOnWrite { diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go index 68cfa292..b9387adf 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go @@ -418,20 +418,17 @@ func (cs *chunkStore) writeChunks(mc *MetricsCache, metric *MetricState) (hasPen var encodingExpr string if !cs.isAggr() { - encodingExpr = fmt.Sprintf("%s='%d'; ", config.EncodingAttrName, activeChunk.appender.Encoding()) + encodingExpr = fmt.Sprintf("%v='%d'; ", config.EncodingAttrName, activeChunk.appender.Encoding()) } - lsetExpr := fmt.Sprintf("%s='%s'; ", config.LabelSetAttrName, metric.key) + lsetExpr := fmt.Sprintf("%v='%s'; ", config.LabelSetAttrName, metric.key) expr = lblexpr + encodingExpr + lsetExpr + expr } // Call the V3IO async UpdateItem method - conditionExpr := fmt.Sprintf("NOT exists(%s) OR (exists(%s) AND %s == '%d')", - config.EncodingAttrName, config.EncodingAttrName, - config.EncodingAttrName, activeChunk.appender.Encoding()) expr += fmt.Sprintf("%v=%d;", config.MaxTimeAttrName, cs.maxTime) // TODO: use max() expr path := partition.GetMetricPath(metric.name, metric.hash, cs.labelNames, cs.isAggr()) request, err := mc.container.UpdateItem( - &v3io.UpdateItemInput{Path: path, Expression: &expr, Condition: conditionExpr}, metric, mc.responseChan) + &v3io.UpdateItemInput{Path: path, Expression: &expr}, metric, mc.responseChan) if err != nil { mc.logger.ErrorWith("UpdateItem failed", "err", err) hasPendingUpdates = false diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go index 7d7845e9..ae13c04f 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go @@ -108,18 +108,36 @@ func (a *varAppender) Chunk() Chunk { } func (a *varAppender) Append(t int64, v interface{}) { + if v == nil { a.appendNoValue(t, varTypeNil, varValueNone) return } - switch val := v.(type) { + switch vType := v.(type) { + case float64: + val := v.(float64) + if val == 0 { + a.appendNoValue(t, varTypeFloat64, varValueZero) + return + + } + + if math.IsNaN(val) { + a.appendNoValue(t, varTypeFloat64, varValueNone) + return + } + + a.appendWithUint(t, varTypeFloat64, math.Float64bits(val)) + case string: - a.appendWithValue(t, varTypeString, []byte(val)) + val := []byte(v.(string)) + a.appendWithValue(t, varTypeString, val) default: - a.logger.Error("unsupported type %T of value %v\n", v, v) + a.logger.Error("unsupported type %v of value %v\n", vType, v) } + } func (a *varAppender) appendNoValue(t int64, varType, varVal byte) { diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go index 06e8df5c..44029b5f 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go @@ -173,18 +173,7 @@ func (a *xorAppender) Chunk() Chunk { func (a *xorAppender) Append(t int64, vvar interface{}) { var tDelta uint64 num := *a.samples - - var v float64 - switch typedValue := vvar.(type) { - case int: - v = float64(typedValue) - case float64: - v = typedValue - default: - a.logger.Warn("Discarding sample {time: %d, value: %v}, as it's value is of incompatible data type. "+ - "Reason: expected 'float' actual '%T'.", t, vvar, vvar) - return - } + v := vvar.(float64) // Do not append if sample is too old. if t < a.t { diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go index d2c51e19..48d2bcbc 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go @@ -62,14 +62,16 @@ const ( DefaultUseServerAggregateCoefficient = 3 // KV attribute names - MaxTimeAttrName = "_maxtime" - LabelSetAttrName = "_lset" - EncodingAttrName = "_enc" - OutOfOrderAttrName = "_ooo" - MetricNameAttrName = "_name" - ObjectNameAttrName = "__name" - ChunkAttrPrefix = "_v" - AggregateAttrPrefix = "_v_" + MaxTimeAttrName = "_maxtime" + LabelSetAttrName = "_lset" + EncodingAttrName = "_enc" + OutOfOrderAttrName = "_ooo" + MetricNameAttrName = "_name" + ObjectNameAttrName = "__name" + ChunkAttrPrefix = "_v" + AggregateAttrPrefix = "_v_" + MtimeSecsAttributeName = "__mtime_secs" + MtimeNSecsAttributeName = "__mtime_nsecs" PrometheusMetricNameAttribute = "__name__" diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go index 8a101bcb..f82ed68a 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go @@ -198,7 +198,7 @@ func (p *PartitionManager) updateSchema() error { } input := &v3io.PutItemInput{Path: schemaFilePath, Attributes: attributes} - err := p.container.PutItemSync(input) + _, err := p.container.PutItemSync(input) if err != nil { outerError = errors.Wrap(err, "failed to update partitions table.") @@ -238,7 +238,7 @@ func (p *PartitionManager) DeletePartitionsFromSchema(partitionsToDelete []*DBPa deletePartitionExpression.WriteString(");") } expression := deletePartitionExpression.String() - err := p.container.UpdateItemSync(&v3io.UpdateItemInput{Path: p.GetSchemaFilePath(), Expression: &expression}) + _, err := p.container.UpdateItemSync(&v3io.UpdateItemInput{Path: p.GetSchemaFilePath(), Expression: &expression}) if err != nil { return err } @@ -592,6 +592,33 @@ func (p *DBPartition) Time2Bucket(t int64) int { return int((t - p.startTime) / p.rollupTime) } +// Return the start time of an aggregation bucket by id +func (p *DBPartition) GetAggregationBucketStartTime(id int) int64 { + return p.startTime + int64(id)*p.rollupTime +} + +// Return the end time of an aggregation bucket by id +func (p *DBPartition) GetAggregationBucketEndTime(id int) int64 { + return p.startTime + int64(id+1)*p.rollupTime - 1 +} + +func (p *DBPartition) Times2BucketRange(start, end int64) []int { + var buckets []int + + if start > p.GetEndTime() || end < p.startTime { + return buckets + } + + startingAggrBucket := p.Time2Bucket(start) + endAggrBucket := p.Time2Bucket(end) + + for bucketID := startingAggrBucket; bucketID <= endAggrBucket; bucketID++ { + buckets = append(buckets, bucketID) + } + + return buckets +} + // Return the nearest chunk start time for the specified time func (p *DBPartition) GetChunkMint(t int64) int64 { if t > p.GetEndTime() { @@ -622,6 +649,37 @@ func (p *DBPartition) TimeToChunkId(tmilli int64) (int, error) { } } +// Check if a chunk (by attribute name) is in the given time range. +func (p *DBPartition) IsChunkInRangeByAttr(attr string, mint, maxt int64) bool { + + // Discard '_v' prefix + chunkIDStr := attr[2:] + chunkID, err := strconv.ParseInt(chunkIDStr, 10, 64) + if err != nil { + return false + } + + chunkStartTime := p.startTime + (chunkID-1)*p.chunkInterval + chunkEndTime := chunkStartTime + p.chunkInterval - 1 + + return mint <= chunkStartTime && maxt >= chunkEndTime +} + +// Get a chunk's start time by it's attribute name +func (p *DBPartition) GetChunkStartTimeByAttr(attr string) (int64, error) { + + // Discard '_v' prefix + chunkIDStr := attr[2:] + chunkID, err := strconv.ParseInt(chunkIDStr, 10, 64) + if err != nil { + return 0, err + } + + chunkStartTime := p.startTime + (chunkID-1)*p.chunkInterval + + return chunkStartTime, nil +} + // Check whether the specified time is within the range of this partition func (p *DBPartition) InRange(t int64) bool { if p.manager.cyclic { diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go index 28030d2a..ce214746 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go @@ -85,26 +85,11 @@ func (suite *testRawChunkIterSuite) TestRawChunkIteratorWithZeroValue() { prevT, prevV := iter.PeakBack() suite.Require().Equal(ingestData[index].Time, t, "current time does not match") - - switch val := ingestData[index].Value.(type) { - case float64: - suite.Require().Equal(val, v, "current value does not match") - case int: - suite.Require().Equal(float64(val), v, "current value does not match") - default: - suite.Require().Equal(val, v, "current value does not match") - } + suite.Require().Equal(ingestData[index].Value, v, "current value does not match") if index > 0 { suite.Require().Equal(ingestData[index-1].Time, prevT, "current time does not match") - switch val := ingestData[index-1].Value.(type) { - case float64: - suite.Require().Equal(val, prevV, "current value does not match") - case int: - suite.Require().Equal(float64(val), prevV, "current value does not match") - default: - suite.Require().Equal(val, prevV, "current value does not match") - } + suite.Require().Equal(ingestData[index-1].Value, prevV, "current value does not match") } index++ } diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go index 232fc610..35e95d10 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go @@ -249,10 +249,7 @@ func downsampleRawData(ctx *selectQueryContext, res *qryResults, func aggregateClientAggregatesCrossSeries(ctx *selectQueryContext, res *qryResults, previousPartitionLastTime int64, previousPartitionLastValue float64) (int64, float64, error) { ctx.logger.Debug("using Client Aggregates Collector for metric %v", res.name) - it, ok := newRawChunkIterator(res, ctx.logger).(*RawChunkIterator) - if !ok { - return previousPartitionLastTime, previousPartitionLastValue, nil - } + it := newRawChunkIterator(res, ctx.logger).(*RawChunkIterator) var previousPartitionEndBucket int if previousPartitionLastTime != 0 { diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go index 45b8724e..efb5fd5d 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go @@ -80,7 +80,7 @@ func (suite *testClientAggregatesSuite) TestQueryAggregateWithNameWildcard() { suite.T().Fatal(err) } - suite.compareMultipleMetrics(data, expected, metricName, aggr) + assert.Equal(suite.T(), expected[metricName][aggr], data, "queried data does not match expected") } assert.Equal(suite.T(), len(expectedData)*len(expected), seriesCount, "series count didn't match expected") @@ -139,7 +139,7 @@ func (suite *testClientAggregatesSuite) TestQueryAggregateWithFilterOnMetricName suite.T().Fatal(err) } - suite.compareMultipleMetrics(data, expected, metricName, aggr) + assert.Equal(suite.T(), expected[metricName][aggr], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -195,7 +195,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesSinglePartition() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -219,8 +219,8 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartition() { tsdbtest.TestOption{ Key: tsdbtest.OptTimeSeries, Value: tsdbtest.TimeSeries{tsdbtest.Metric{ - Labels: labels1, Name: "cpu", + Labels: labels1, Data: ingestedData}, }}) tsdbtest.InsertData(suite.T(), testParams) @@ -255,7 +255,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartition() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -315,7 +315,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartitionNonCon suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") @@ -371,7 +371,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartitionOneSte suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -473,7 +473,7 @@ func (suite *testClientAggregatesSuite) TestSelectAggregatesByRequestedColumns() suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -530,7 +530,7 @@ func (suite *testClientAggregatesSuite) TestSelectAggregatesAndRawByRequestedCol suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -588,7 +588,7 @@ func (suite *testClientAggregatesSuite) TestQueryAllData() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -618,8 +618,7 @@ func (suite *testClientAggregatesSuite) TestAggregatesWithZeroStep() { }}) tsdbtest.InsertData(suite.T(), testParams) - expected := map[string][]tsdbtest.DataPoint{ - "max": {{Time: suite.basicQueryTime, Value: 40}}, + expected := map[string][]tsdbtest.DataPoint{"max": {{Time: suite.basicQueryTime, Value: 40}}, "min": {{Time: suite.basicQueryTime, Value: 10}}, "sum": {{Time: suite.basicQueryTime, Value: 100}}, "count": {{Time: suite.basicQueryTime, Value: 4}}, @@ -647,9 +646,7 @@ func (suite *testClientAggregatesSuite) TestAggregatesWithZeroStep() { suite.T().Fatal(err) } - for i, dataPoint := range expected[agg] { - suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") - } + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 4, seriesCount, "series count didn't match expected") @@ -701,7 +698,7 @@ func (suite *testClientAggregatesSuite) TestUsePreciseAggregationsConfig() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(3, seriesCount, "series count didn't match expected") diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go index 624ec921..6488677c 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go @@ -82,7 +82,8 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesTimesFalls if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -152,7 +153,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregates() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -218,7 +219,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiParti suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -284,7 +285,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesWithInterp suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -358,7 +359,8 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiParti if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -442,7 +444,8 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiParti if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -509,7 +512,8 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesWithInterp if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -566,7 +570,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesSinglePart suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -625,7 +629,8 @@ func (suite *testCrossSeriesAggregatesSuite) TestOnlyVirtualCrossSeriesAggregate if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + suite.Require().Equal(expected[agg], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -694,7 +699,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesSameLabelM metricName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) suite.NoError(err) - suite.compareSingleMetricWithAggregator(data, expected, fmt.Sprintf("%v-%v", agg, metricName)) + suite.Require().Equal(expected[fmt.Sprintf("%v-%v", agg, metricName)], data, "queried data does not match expected") } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -763,7 +768,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesDifferentL data, err := tsdbtest.IteratorToSlice(iter) suite.NoError(err) - suite.compareSingleMetric(data, expected) + suite.Require().Equal(expected, data, "queried data does not match expected") } suite.Require().Equal(2, seriesCount, "series count didn't match expected") diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go index f52546de..ace74820 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go @@ -3,7 +3,6 @@ package pqueriertest import ( - "errors" "fmt" "math" "strings" @@ -87,17 +86,7 @@ func (suite *testSelectDataframeSuite) TestAggregatesWithZeroStepSelectDataframe currentColAggregate := strings.Split(col.Name(), "(")[0] f, err := col.FloatAt(0) assert.NoError(suite.T(), err) - - var expectedFloat float64 - switch val := expected[currentColAggregate].Value.(type) { - case int: - expectedFloat = float64(val) - case float64: - expectedFloat = val - default: - suite.Failf("invalid data type", "expected int or float, actual type is %t", val) - } - suite.Require().Equal(expectedFloat, f) + suite.Require().Equal(expected[currentColAggregate].Value, f) } } @@ -219,23 +208,11 @@ func (suite *testSelectDataframeSuite) Test2Series1EmptySelectDataframe() { assert.Equal(suite.T(), len(ingestedData), col.Len()) for i := 0; i < col.Len(); i++ { currentExpected := expected[col.Name()][i].Value - switch val := currentExpected.(type) { - case float64: - fv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(val) && math.IsNaN(fv)) { - assert.Equal(suite.T(), currentExpected, fv) - } - case int: - iv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), float64(val), iv) - case string: - sv, err := col.StringAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), val, sv) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + + if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) } } } @@ -391,24 +368,11 @@ func (suite *testSelectDataframeSuite) TestQueryDataFrameMultipleMetricsWithMult currentExpectedData := expectedData[fmt.Sprintf("%v-%v", col.Name(), frame.Labels()["os"])] assert.Equal(suite.T(), len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) - switch val := currentExpected.(type) { - case float64: - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(val) && math.IsNaN(f)) { - assert.Equal(suite.T(), currentExpected, f) - } - case int: - iv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), float64(val), iv) - case string: - s, err := col.StringAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), val, s) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) + if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) } } } @@ -648,24 +612,11 @@ func (suite *testSelectDataframeSuite) TestQueryDataFrameMultipleMetrics() { currentExpectedData := expectedData[col.Name()] suite.Require().Equal(len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) - switch val := currentExpected.(type) { - case float64: - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(val) && math.IsNaN(f)) { - assert.Equal(suite.T(), currentExpected, f) - } - case int: - iv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), float64(val), iv) - case string: - s, err := col.StringAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), val, s) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) + if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { + suite.Require().Equal(currentExpected, f) } } } @@ -748,23 +699,11 @@ func (suite *testSelectDataframeSuite) TestColumnOrder() { currentExpectedData := expectedData[col.Name()] suite.Require().Equal(len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value - switch val := currentExpected.(type) { - case float64: - fv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(val) && math.IsNaN(fv)) { - assert.Equal(suite.T(), currentExpected, fv) - } - case int: - iv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), float64(val), iv) - case string: - sv, err := col.StringAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), val, sv) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + + if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { + suite.Require().Equal(currentExpected, f) } } } @@ -833,24 +772,11 @@ func (suite *testSelectDataframeSuite) TestQueryNonExistingMetric() { currentExpectedData := expectedData[col.Name()] suite.Require().Equal(len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) - switch val := currentExpected.(type) { - case float64: - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(val) && math.IsNaN(f)) { - assert.Equal(suite.T(), currentExpected, f) - } - case int: - iv, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), float64(val), iv) - case string: - s, err := col.StringAt(i) - assert.NoError(suite.T(), err) - assert.Equal(suite.T(), val, s) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) + if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { + suite.Require().Equal(currentExpected, f) } } } diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go index e2064425..72d2f7ac 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go @@ -120,7 +120,7 @@ func (suite *testDownsampleSuite) TestRawDataSinglePartitionWithDownSample() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expectedData) + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -176,7 +176,7 @@ func (suite *testDownsampleSuite) TestRawDataDownSampleMultiPartitions() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expectedData) + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go index 8346aea1..2d58da27 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go @@ -66,21 +66,3 @@ func (suite *basicQueryTestSuite) TearDownTest() { tsdbtest.DeleteTSDB(suite.T(), suite.v3ioConfig) } } - -func (suite *basicQueryTestSuite) compareSingleMetric(data []tsdbtest.DataPoint, expected []tsdbtest.DataPoint) { - for i, dataPoint := range data { - suite.Require().True(dataPoint.Equals(expected[i]), "queried data does not match expected") - } -} - -func (suite *basicQueryTestSuite) compareSingleMetricWithAggregator(data []tsdbtest.DataPoint, expected map[string][]tsdbtest.DataPoint, agg string) { - for i, dataPoint := range data { - suite.Require().True(dataPoint.Equals(expected[agg][i]), "queried data does not match expected") - } -} - -func (suite *basicQueryTestSuite) compareMultipleMetrics(data []tsdbtest.DataPoint, expected map[string]map[string][]tsdbtest.DataPoint, metricName string, aggr string) { - for i, dataPoint := range data { - suite.Require().True(dataPoint.Equals(expected[metricName][aggr][i]), "queried data does not match expected") - } -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go index cb7e646f..4b63277b 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go @@ -88,7 +88,8 @@ func (suite *testSQLSyntaxQuerySuite) TestGroupByOneLabelSinglePartition() { agg := set.At().Labels().Get(aggregate.AggregateLabel) groupByValue := set.At().Labels().Get("os") suite.Require().NoError(err) - suite.compareMultipleMetrics(data, expected, groupByValue, agg) + + suite.Require().Equal(expected[groupByValue][agg], data, "queried data does not match expected") } suite.Require().Equal(4, seriesCount, "series count didn't match expected") @@ -171,7 +172,8 @@ func (suite *testSQLSyntaxQuerySuite) TestGroupByMultipleLabelsSinglePartition() labelsStr := strings.Join(groupByValue, "-") suite.Require().NoError(err) - suite.compareMultipleMetrics(data, expected, labelsStr, agg) + + suite.Require().Equal(expected[labelsStr][agg], data, "queried data does not match expected") } suite.Require().Equal(6, seriesCount, "series count didn't match expected") diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go index 3a61864c..d7c94207 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go @@ -3,7 +3,6 @@ package pqueriertest import ( - "errors" "fmt" "math" "testing" @@ -74,7 +73,7 @@ func (suite *testRawQuerySuite) TestRawDataSinglePartition() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expectedData) + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -130,7 +129,7 @@ func (suite *testRawQuerySuite) TestRawDataMultiplePartitions() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expectedData) + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -187,7 +186,7 @@ func (suite *testRawQuerySuite) TestFilterOnLabel() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expectedData) + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -282,9 +281,7 @@ func (suite *testRawQuerySuite) TestSelectRawDataByRequestedColumns() { suite.T().Fatal(err) } - for i, dataPoint := range expected { - suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") - } + assert.Equal(suite.T(), expected, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -349,9 +346,7 @@ func (suite *testRawQuerySuite) TestRawDataMultipleMetrics() { suite.T().Fatal(err) } - for i, dataPoint := range expectedData[name] { - suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") - } + assert.Equal(suite.T(), expectedData[name], data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -497,9 +492,7 @@ func (suite *testRawQuerySuite) TestQueryMultipleMetricsWithMultipleLabelSets() suite.T().Fatal(err) } - for i, dataPoint := range expectedData[fmt.Sprintf("%v-%v", name, os)] { - suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") - } + assert.Equal(suite.T(), expectedData[fmt.Sprintf("%v-%v", name, os)], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -548,7 +541,7 @@ func (suite *testRawQuerySuite) TestDifferentLabelSetsInDifferentPartitions() { suite.T().Fatal(err) } - suite.compareSingleMetric(data, expected) + suite.Require().Equal(expected, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -598,9 +591,7 @@ func (suite *testRawQuerySuite) TestDifferentMetricsInDifferentPartitions() { suite.T().Fatal(err) } - for i, dataPoint := range expected { - suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") - } + suite.Require().Equal(expected, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -772,20 +763,7 @@ func (suite *testRawQuerySuite) TestLoadPartitionsFromAttributes() { suite.T().Fatal(err) } - for i := 0; i < len(expectedData); i++ { - assert.Equal(suite.T(), expectedData[i].Time, data[i].Time) - currentExpected := expectedData[i].Value - switch val := currentExpected.(type) { - case float64: - assert.Equal(suite.T(), val, data[i].Value) - case int: - assert.Equal(suite.T(), float64(val), data[i].Value) - case string: - assert.Equal(suite.T(), val, data[i].Value) - default: - assert.Error(suite.T(), errors.New("unsupported data type")) - } - } + assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go index 4c579fff..811a3c1d 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go @@ -75,7 +75,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesSinglePartition() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -135,7 +135,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesSinglePartitionNegative suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -200,7 +200,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesMultiPartition() { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") @@ -263,7 +263,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesMultiPartitionNonConcre suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") @@ -319,7 +319,7 @@ func (suite *testServerAggregatesSuite) TestSelectServerAggregatesAndRawByReques suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -375,8 +375,9 @@ func (suite *testServerAggregatesSuite) TestAggregatesWithDisabledClientAggregat if err != nil { suite.T().Fatal(err) } - - suite.compareSingleMetricWithAggregator(data, expected, agg) + currentExpected, ok := expected[agg] + suite.Require().Equal(true, ok, "got unexpected aggregate result") + assert.Equal(suite.T(), currentExpected, data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go index be0a304d..a8943199 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go @@ -80,7 +80,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowBigg suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -144,7 +144,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowSmal suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -208,7 +208,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowEqua suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -273,7 +273,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowExce suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -337,7 +337,7 @@ func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowBigg suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -401,7 +401,7 @@ func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowEqua suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -471,7 +471,8 @@ func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowEqua if err != nil { suite.T().Fatal(err) } - suite.compareSingleMetricWithAggregator(data, expected, agg) + + assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go new file mode 100644 index 00000000..dfe24a51 --- /dev/null +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go @@ -0,0 +1,1141 @@ +// +build integration + +package tsdb_test + +import ( + "fmt" + "math" + "path" + "strconv" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + v3io "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + . "github.com/v3io/v3io-tsdb/pkg/tsdb" + "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" + "github.com/v3io/v3io-tsdb/pkg/utils" +) + +func timeStringToMillis(timeStr string) int64 { + ta, _ := time.Parse(time.RFC3339, timeStr) + return ta.Unix() * 1000 +} +func TestDeleteTable(t *testing.T) { + ta, _ := time.Parse(time.RFC3339, "2018-10-03T05:00:00Z") + t1 := ta.Unix() * 1000 + tb, _ := time.Parse(time.RFC3339, "2018-10-07T05:00:00Z") + t2 := tb.Unix() * 1000 + tc, _ := time.Parse(time.RFC3339, "2018-10-11T05:00:00Z") + t3 := tc.Unix() * 1000 + td, _ := time.Parse(time.RFC3339, "now + 1w") + futurePoint := td.Unix() * 1000 + + defaultTimeMillis := timeStringToMillis("2019-07-21T00:00:00Z") + generalData := []tsdbtest.DataPoint{ + // partition 1 + // chunk a + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + // partition 2 + // chunk a + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + // partition 3 + // chunk a + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + // chunk b + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}} + partitions1StartTime := timeStringToMillis("2019-07-21T00:00:00Z") + partitions2StartTime := timeStringToMillis("2019-07-23T00:00:00Z") + partitions3StartTime := timeStringToMillis("2019-07-25T00:00:00Z") + + testCases := []struct { + desc string + deleteParams DeleteParams + data tsdbtest.TimeSeries + expectedData map[string][]tsdbtest.DataPoint + expectedPartitions []int64 + ignoreReason string + }{ + {desc: "Should delete all table by time", + deleteParams: DeleteParams{ + From: 0, + To: 9999999999999, + IgnoreErrors: true, + }, + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}, + {Time: futurePoint, Value: 555.5}}, + }}, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {}}, + }, + {desc: "Should delete all table by deleteAll", + deleteParams: DeleteParams{ + From: 0, + To: t1, + DeleteAll: true, + IgnoreErrors: true, + }, + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}, + {Time: futurePoint, Value: 555.5}}, + }}, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {}}, + }, + {desc: "Should delete whole partitions", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Filter: "os == 'win'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-win": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole partitions specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions2StartTime - 1, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole chunks specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + tsdbtest.HoursInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + + { + desc: "Should delete partial chunk in the start", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 4*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk in the middle", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 3*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 7*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk in the end", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{ + "cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + }, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partial chunk specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime, + To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + }, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks specific metrics", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete mixed partitions and chunks specific metrics with filter", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "cpu", + Labels: utils.LabelsFromStringList("os", "win"), + Data: generalData, + }, tsdbtest.Metric{ + Name: "disk", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + tsdbtest.HoursInMillis, + To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, + Metrics: []string{"cpu"}, + Filter: "os == 'linux'", + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "cpu-win": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, + "disk-linux": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete partially last chunk and update max time", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions3StartTime + 1*tsdbtest.HoursInMillis + 6*tsdbtest.MinuteInMillis, + To: partitions3StartTime + 1*tsdbtest.HoursInMillis + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole last chunk and update max time", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions3StartTime + 1*tsdbtest.HoursInMillis, + To: partitions3StartTime + 2*tsdbtest.HoursInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + { + desc: "Should delete whole all samples in chunk but time range is not bigger then chunk", + data: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "cpu", + Data: generalData, + }}, + deleteParams: DeleteParams{ + From: partitions1StartTime + 1*tsdbtest.HoursInMillis + 2*tsdbtest.MinuteInMillis, + To: partitions1StartTime + 2*tsdbtest.HoursInMillis + 11*tsdbtest.MinuteInMillis, + }, + expectedData: map[string][]tsdbtest.DataPoint{"cpu": { + {Time: defaultTimeMillis, Value: 1.2}, + {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, + + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, + {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, + expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testDeleteTSDBCase(t, + tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptDropTableOnTearDown, + Value: !test.deleteParams.DeleteAll}, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: test.data}, + ), test.deleteParams, test.expectedData, test.expectedPartitions) + }) + } +} + +func getCurrentPartitions(test *testing.T, container v3io.Container, path string) []int64 { + input := &v3io.GetItemInput{Path: path + "/.schema", + AttributeNames: []string{"*"}} + res, err := container.GetItemSync(input) + if err != nil { + test.Fatal(errors.Wrap(err, "failed to get schema")) + } + output := res.Output.(*v3io.GetItemOutput) + var partitions []int64 + for part := range output.Item { + partitionsStartTime, _ := strconv.ParseInt(part[1:], 10, 64) // parse attribute and discard attribute prefix + partitions = append(partitions, partitionsStartTime) + } + return partitions +} + +func testDeleteTSDBCase(test *testing.T, testParams tsdbtest.TestParams, deleteParams DeleteParams, + expectedData map[string][]tsdbtest.DataPoint, expectedPartitions []int64) { + + adapter, teardown := tsdbtest.SetUpWithData(test, testParams) + defer teardown() + + container, err := utils.CreateContainer(adapter.GetLogger("container"), testParams.V3ioConfig(), adapter.HttpTimeout) + if err != nil { + test.Fatalf("failed to create new container. reason: %s", err) + } + + if err := adapter.DeleteDB(deleteParams); err != nil { + test.Fatalf("Failed to delete DB. reason: %s", err) + } + + if !deleteParams.DeleteAll { + actualPartitions := getCurrentPartitions(test, container, testParams.V3ioConfig().TablePath) + assert.ElementsMatch(test, expectedPartitions, actualPartitions, "remaining partitions are not as expected") + + qry, err := adapter.QuerierV2() + if err != nil { + test.Fatalf("Failed to create Querier. reason: %v", err) + } + + params := &pquerier.SelectParams{ + From: 0, + To: math.MaxInt64, + Filter: "1==1", + } + set, err := qry.Select(params) + if err != nil { + test.Fatalf("Failed to run Select. reason: %v", err) + } + + for set.Next() { + series := set.At() + labels := series.Labels() + osLabel := labels.Get("os") + metricName := labels.Get(config.PrometheusMetricNameAttribute) + iter := series.Iterator() + if iter.Err() != nil { + test.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + + actual, err := iteratorToSlice(iter) + if err != nil { + test.Fatal(err) + } + expectedDataKey := metricName + if osLabel != "" { + expectedDataKey = fmt.Sprintf("%v-%v", expectedDataKey, osLabel) + } + + assert.ElementsMatch(test, expectedData[expectedDataKey], actual, + "result data for '%v' didn't match, expected: %v\n actual: %v\n", expectedDataKey, expectedData[expectedDataKey], actual) + + } + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + } else { + container, tablePath := adapter.GetContainer() + tableSchemaPath := path.Join(tablePath, config.SchemaConfigFileName) + + // Validate: schema does not exist + _, err := container.GetObjectSync(&v3io.GetObjectInput{Path: tableSchemaPath}) + if err != nil { + if utils.IsNotExistsError(err) { + // OK - expected + } else { + test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tableSchemaPath, err) + } + } + + // Validate: table does not exist + _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: tablePath}) + if err != nil { + if utils.IsNotExistsError(err) { + // OK - expected + } else { + test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tablePath, err) + } + } + } +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go index 2ed1b042..4cfc9a6a 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go @@ -13,7 +13,6 @@ import ( "github.com/v3io/v3io-tsdb/internal/pkg/performance" "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" - "github.com/v3io/v3io-tsdb/pkg/pquerier" . "github.com/v3io/v3io-tsdb/pkg/tsdb" "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils" "github.com/v3io/v3io-tsdb/pkg/utils" @@ -25,59 +24,12 @@ const DaysInMillis = 24 * HoursInMillis type DataPoint struct { Time int64 - Value interface{} -} - -func (dp DataPoint) Equals(other DataPoint) bool { - if &dp.Time != &other.Time { - return true - } - if dp.Time != other.Time { - return false - } - - switch dpVal := dp.Value.(type) { - case float64: - switch oVal := other.Value.(type) { - case float64: - return dpVal == oVal - case int: - return dpVal == float64(oVal) - default: - return false - } - case int: - switch oVal := other.Value.(type) { - case float64: - return float64(dpVal) == oVal - case int: - return dpVal == oVal - default: - return false - } - case string: - switch oVal := other.Value.(type) { - case string: - return oVal == dpVal - case float64: - soVal := fmt.Sprintf("%f", oVal) - return dpVal == soVal - case int: - soVal := fmt.Sprintf("%d", oVal) - return dpVal == soVal - default: - return false - } - default: - return false - } + Value float64 } - type Metric struct { - Name string - Labels utils.Labels - Data []DataPoint - ExpectedCount *int + Name string + Labels utils.Labels + Data []DataPoint } type TimeSeries []Metric @@ -148,8 +100,7 @@ func DeleteTSDB(t testing.TB, v3ioConfig *config.V3ioConfig) { t.Fatalf("Failed to create an adapter. Reason: %s", err) } - now := time.Now().Unix() * 1000 // Current time (now) in milliseconds - if err := adapter.DeleteDB(true, true, 0, now); err != nil { + if err := adapter.DeleteDB(DeleteParams{DeleteAll: true, IgnoreErrors: true}); err != nil { t.Fatalf("Failed to delete a TSDB instance (table) on teardown. Reason: %s", err) } } @@ -175,13 +126,7 @@ func tearDown(t testing.TB, v3ioConfig *config.V3ioConfig, testParams TestParams func SetUp(t testing.TB, testParams TestParams) func() { v3ioConfig := testParams.V3ioConfig() - - if overrideTableName, ok := testParams["override_test_name"]; ok { - v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%v", overrideTableName)) - } else { - v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) - } - + v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) CreateTestTSDB(t, v3ioConfig) // Measure performance @@ -271,17 +216,12 @@ func ValidateCountOfSamples(t testing.TB, adapter *V3ioAdapter, metricName strin stepSize = queryAggStep } - qry, err := adapter.QuerierV2() + qry, err := adapter.Querier(nil, startTimeMs-stepSize, endTimeMs) if err != nil { t.Fatal(err, "Failed to create a Querier instance.") } - selectParams := &pquerier.SelectParams{From: startTimeMs - stepSize, - To: endTimeMs, - Functions: "count", - Step: stepSize, - Filter: fmt.Sprintf("starts(__name__, '%v')", metricName)} - set, err := qry.Select(selectParams) + set, err := qry.Select("", "count", stepSize, fmt.Sprintf("starts(__name__, '%v')", metricName)) var actualCount int for set.Next() { @@ -322,7 +262,7 @@ func ValidateRawData(t testing.TB, adapter *V3ioAdapter, metricName string, star for set.Next() { // Start over for each label set - var lastDataPoint *DataPoint = nil + var lastDataPoint = &DataPoint{Time: -1, Value: -1.0} if set.Err() != nil { t.Fatal(set.Err(), "Failed to get the next element from a result set.") @@ -337,16 +277,12 @@ func ValidateRawData(t testing.TB, adapter *V3ioAdapter, metricName string, star currentTime, currentValue := iter.At() currentDataPoint := &DataPoint{Time: currentTime, Value: currentValue} - if lastDataPoint != nil { - switch dataType := lastDataPoint.Value.(type) { - case string, float64, int, int64: - // Note: We cast float to integer to eliminate the risk of a precision error - if !isValid(lastDataPoint, currentDataPoint) { - t.Fatalf("The raw-data consistency check failed: metric name='%s'\n\tisValid(%v, %v) == false", - metricName, lastDataPoint, currentDataPoint) - } - default: - t.Fatalf("Got value of unsupported data type: %T", dataType) + if lastDataPoint.Value >= 0 { + // Note: We cast float to integer to eliminate the risk of a + // precision error + if !isValid(lastDataPoint, currentDataPoint) { + t.Fatalf("The raw-data consistency check failed: metric name='%s'\n\tisValid(%v, %v) == false", + metricName, lastDataPoint, currentDataPoint) } } lastDataPoint = currentDataPoint diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go index 760468a0..2a9b79d7 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go @@ -22,18 +22,24 @@ package tsdb import ( "context" + "encoding/base64" "encoding/json" "fmt" "math" pathUtil "path" "path/filepath" + "strconv" + "strings" + "sync" "time" "github.com/nuclio/logger" "github.com/pkg/errors" "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-go/pkg/dataplane/http" + "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/appender" + "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" "github.com/v3io/v3io-tsdb/pkg/partmgr" "github.com/v3io/v3io-tsdb/pkg/pquerier" @@ -42,7 +48,14 @@ import ( "github.com/v3io/v3io-tsdb/pkg/utils" ) -const defaultHttpTimeout = 30 * time.Second +const ( + defaultHttpTimeout = 30 * time.Second + + errorCodeString = "ErrorCode" + falseConditionOuterErrorCode = "184549378" // todo: change codes + falseConditionInnerErrorCode = "385876025" + maxExpressionsInUpdateItem = 1500 // max is 2000, we're taking a buffer since it doesn't work with 2000 +) type V3ioAdapter struct { startTimeMargin int64 @@ -54,6 +67,15 @@ type V3ioAdapter struct { partitionMngr *partmgr.PartitionManager } +type DeleteParams struct { + Metrics []string + Filter string + From, To int64 + DeleteAll bool + + IgnoreErrors bool +} + func CreateTSDB(cfg *config.V3ioConfig, schema *config.Schema) error { lgr, _ := utils.NewLogger(cfg.LogLevel) @@ -240,59 +262,55 @@ func (a *V3ioAdapter) QuerierV2() (*pquerier.V3ioQuerier, error) { return pquerier.NewV3ioQuerier(a.container, a.logger, a.cfg, a.partitionMngr), nil } -func (a *V3ioAdapter) DeleteDB(deleteAll bool, ignoreErrors bool, fromTime int64, toTime int64) error { - if deleteAll { +// Delete by time range can optionally specify metrics and filter by labels +func (a *V3ioAdapter) DeleteDB(deleteParams DeleteParams) error { + if deleteParams.DeleteAll { // Ignore time boundaries - fromTime = 0 - toTime = math.MaxInt64 - } - - partitions := a.partitionMngr.PartsForRange(fromTime, toTime, false) - for _, part := range partitions { - a.logger.Info("Deleting partition '%s'.", part.GetTablePath()) - err := utils.DeleteTable(a.logger, a.container, part.GetTablePath(), "", a.cfg.QryWorkers) - if err != nil && !ignoreErrors { - return errors.Wrapf(err, "Failed to delete partition '%s'.", part.GetTablePath()) - } - // Delete the Directory object - err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: part.GetTablePath()}) - if err != nil && !ignoreErrors { - return errors.Wrapf(err, "Failed to delete partition object '%s'.", part.GetTablePath()) + deleteParams.From = 0 + deleteParams.To = math.MaxInt64 + } else { + if deleteParams.To == 0 { + deleteParams.To = time.Now().Unix() * 1000 } } - err := a.partitionMngr.DeletePartitionsFromSchema(partitions) + + // Delete Data + err := a.DeletePartitionsData(&deleteParams) if err != nil { return err } + // If no data is left, delete Names folder if len(a.partitionMngr.GetPartitionsPaths()) == 0 { path := filepath.Join(a.cfg.TablePath, config.NamesDirectory) + "/" // Need a trailing slash a.logger.Info("Delete metric names at path '%s'.", path) err := utils.DeleteTable(a.logger, a.container, path, "", a.cfg.QryWorkers) - if err != nil && !ignoreErrors { + if err != nil && !deleteParams.IgnoreErrors { return errors.Wrap(err, "Failed to delete the metric-names table.") } // Delete the Directory object err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) - if err != nil && !ignoreErrors { + if err != nil && !deleteParams.IgnoreErrors { if !utils.IsNotExistsError(err) { return errors.Wrapf(err, "Failed to delete table object '%s'.", path) } } } - if deleteAll { + + // If need to 'deleteAll', delete schema + TSDB table folder + if deleteParams.DeleteAll { // Delete Schema file schemaPath := pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName) a.logger.Info("Delete the TSDB configuration at '%s'.", schemaPath) err := a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: schemaPath}) - if err != nil && !ignoreErrors { + if err != nil && !deleteParams.IgnoreErrors { return errors.New("The configuration at '" + schemaPath + "' cannot be deleted or doesn't exist.") } // Delete the Directory object path := a.cfg.TablePath + "/" err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) - if err != nil && !ignoreErrors { + if err != nil && !deleteParams.IgnoreErrors { if !utils.IsNotExistsError(err) { return errors.Wrapf(err, "Failed to delete table object '%s'.", path) } @@ -302,6 +320,457 @@ func (a *V3ioAdapter) DeleteDB(deleteAll bool, ignoreErrors bool, fromTime int64 return nil } +func (a *V3ioAdapter) DeletePartitionsData(deleteParams *DeleteParams) error { + partitions := a.partitionMngr.PartsForRange(deleteParams.From, deleteParams.To, true) + var entirelyDeletedPartitions []*partmgr.DBPartition + + deleteWholePartition := deleteParams.DeleteAll || (deleteParams.Filter == "" && len(deleteParams.Metrics) == 0) + + fileToDeleteChan := make(chan v3io.Item, 1024) + getItemsTerminationChan := make(chan error, len(partitions)) + deleteTerminationChan := make(chan error, a.cfg.Workers) + numOfGetItemsRoutines := len(partitions) + if len(deleteParams.Metrics) > 0 { + numOfGetItemsRoutines = numOfGetItemsRoutines * len(deleteParams.Metrics) + } + goRoutinesNum := numOfGetItemsRoutines + a.cfg.Workers + onErrorTerminationChannel := make(chan struct{}, goRoutinesNum) + systemAttributesToFetch := []string{config.ObjectNameAttrName, config.MtimeSecsAttributeName, config.MtimeNSecsAttributeName, config.EncodingAttrName, config.MaxTimeAttrName} + var getItemsWorkers, getItemsTerminated, deletesTerminated int + + var getItemsWG sync.WaitGroup + getItemsErrorChan := make(chan error, numOfGetItemsRoutines) + + aggregates := a.GetSchema().PartitionSchemaInfo.Aggregates + hasServerSideAggregations := len(aggregates) != 1 || aggregates[0] != "" + + var aggrMask aggregate.AggrType + var err error + if hasServerSideAggregations { + aggrMask, _, err = aggregate.AggregatesFromStringListWithCount(aggregates) + if err != nil { + return err + } + } + + for i := 0; i <= a.cfg.Workers; i++ { + go deleteObjectWorker(a.container, deleteParams, a.logger, + fileToDeleteChan, deleteTerminationChan, onErrorTerminationChannel, + aggrMask) + } + + for _, part := range partitions { + partitionEntirelyInRange := deleteParams.From <= part.GetStartTime() && deleteParams.To >= part.GetEndTime() + deleteEntirePartitionFolder := partitionEntirelyInRange && deleteWholePartition + + // Delete all files in partition folder and then delete the folder itself + if deleteEntirePartitionFolder { + a.logger.Info("Deleting entire partition '%s'.", part.GetTablePath()) + + getItemsWG.Add(1) + go deleteEntirePartition(a.logger, a.container, part.GetTablePath(), a.cfg.QryWorkers, + &getItemsWG, getItemsErrorChan) + + entirelyDeletedPartitions = append(entirelyDeletedPartitions, part) + // First get all items based on filter+metric+time range then delete what is necessary + } else { + a.logger.Info("Deleting partial partition '%s'.", part.GetTablePath()) + + start, end := deleteParams.From, deleteParams.To + + // Round the start and end times to the nearest aggregation buckets - to later on recalculate server side aggregations + if hasServerSideAggregations { + start = part.GetAggregationBucketStartTime(part.Time2Bucket(deleteParams.From)) + end = part.GetAggregationBucketEndTime(part.Time2Bucket(deleteParams.To)) + } + + var chunkAttributesToFetch []string + + // If we don't want to delete the entire object, fetch also the desired chunks to delete. + if !partitionEntirelyInRange { + chunkAttributesToFetch, _ = part.Range2Attrs("v", start, end) + } + + allAttributes := append(chunkAttributesToFetch, systemAttributesToFetch...) + if len(deleteParams.Metrics) == 0 { + getItemsWorkers++ + input := &v3io.GetItemsInput{Path: part.GetTablePath(), + AttributeNames: allAttributes, + Filter: deleteParams.Filter} + go getItemsWorker(a.logger, a.container, input, part, fileToDeleteChan, getItemsTerminationChan, onErrorTerminationChannel) + } else { + for _, metric := range deleteParams.Metrics { + for _, shardingKey := range part.GetShardingKeys(metric) { + getItemsWorkers++ + input := &v3io.GetItemsInput{Path: part.GetTablePath(), + AttributeNames: allAttributes, + Filter: deleteParams.Filter, + ShardingKey: shardingKey} + go getItemsWorker(a.logger, a.container, input, part, fileToDeleteChan, getItemsTerminationChan, onErrorTerminationChannel) + } + } + } + } + } + a.logger.Debug("issued %v getItems", getItemsWorkers) + + // Waiting fot deleting of full partitions + getItemsWG.Wait() + select { + case err = <-getItemsErrorChan: + fmt.Println("got error", err) + // Signal all other goroutines to quite + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return err + default: + } + + if getItemsWorkers != 0 { + for deletesTerminated < a.cfg.Workers { + select { + case err := <-getItemsTerminationChan: + a.logger.Debug("finished getItems worker, total finished: %v, error: %v", getItemsTerminated+1, err) + if err != nil { + // If requested to ignore non-existing tables do not return error. + if !(deleteParams.IgnoreErrors && utils.IsNotExistsOrConflictError(err)) { + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return errors.Wrapf(err, "GetItems failed during recursive delete.") + } + } + getItemsTerminated++ + + if getItemsTerminated == getItemsWorkers { + close(fileToDeleteChan) + } + case err := <-deleteTerminationChan: + a.logger.Debug("finished delete worker, total finished: %v, err: %v", deletesTerminated+1, err) + if err != nil { + for i := 0; i < goRoutinesNum; i++ { + onErrorTerminationChannel <- struct{}{} + } + return errors.Wrapf(err, "Delete failed during recursive delete.") + } + deletesTerminated++ + } + } + } else { + close(fileToDeleteChan) + } + + a.logger.Debug("finished deleting data, removing partitions from schema") + err = a.partitionMngr.DeletePartitionsFromSchema(entirelyDeletedPartitions) + if err != nil { + return err + } + + return nil +} + +func deleteEntirePartition(logger logger.Logger, container v3io.Container, partitionPath string, workers int, + wg *sync.WaitGroup, errChannel chan<- error) { + defer wg.Done() + + err := utils.DeleteTable(logger, container, partitionPath, "", workers) + if err != nil { + errChannel <- errors.Wrapf(err, "Failed to delete partition '%s'.", partitionPath) + } + // Delete the Directory object + err = container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: partitionPath}) + if err != nil { + errChannel <- errors.Wrapf(err, "Failed to delete partition folder '%s'.", partitionPath) + } +} + +func getItemsWorker(logger logger.Logger, container v3io.Container, input *v3io.GetItemsInput, partition *partmgr.DBPartition, + filesToDeleteChan chan<- v3io.Item, terminationChan chan<- error, onErrorTerminationChannel <-chan struct{}) { + for { + select { + case _ = <-onErrorTerminationChannel: + terminationChan <- nil + return + default: + } + + logger.Debug("going to getItems for partition '%v', input: %v", partition.GetTablePath(), *input) + resp, err := container.GetItemsSync(input) + if err != nil { + terminationChan <- err + return + } + resp.Release() + output := resp.Output.(*v3io.GetItemsOutput) + + for _, item := range output.Items { + item["partition"] = partition + + // In case we got error on delete while iterating getItems response + select { + case _ = <-onErrorTerminationChannel: + terminationChan <- nil + return + default: + } + + filesToDeleteChan <- item + } + if output.Last { + terminationChan <- nil + return + } + input.Marker = output.NextMarker + } +} + +func deleteObjectWorker(container v3io.Container, deleteParams *DeleteParams, logger logger.Logger, + filesToDeleteChannel <-chan v3io.Item, terminationChan chan<- error, onErrorTerminationChannel <-chan struct{}, + aggrMask aggregate.AggrType) { + for { + select { + case _ = <-onErrorTerminationChannel: + return + case itemToDelete, ok := <-filesToDeleteChannel: + if !ok { + terminationChan <- nil + return + } + + currentPartition := itemToDelete.GetField("partition").(*partmgr.DBPartition) + fileName, err := itemToDelete.GetFieldString(config.ObjectNameAttrName) + if err != nil { + terminationChan <- err + return + } + fullFileName := pathUtil.Join(currentPartition.GetTablePath(), fileName) + + // Delete whole object + if deleteParams.From <= currentPartition.GetStartTime() && + deleteParams.To >= currentPartition.GetEndTime() { + + logger.Debug("delete entire item '%v' ", fullFileName) + input := &v3io.DeleteObjectInput{Path: fullFileName} + err = container.DeleteObjectSync(input) + if err != nil && !utils.IsNotExistsOrConflictError(err) { + terminationChan <- err + return + } + // Delete partial object - specific chunks or sub-parts of chunks + } else { + mtimeSecs, err := itemToDelete.GetFieldInt(config.MtimeSecsAttributeName) + if err != nil { + terminationChan <- err + return + } + mtimeNSecs, err := itemToDelete.GetFieldInt(config.MtimeNSecsAttributeName) + if err != nil { + terminationChan <- err + return + } + + deleteUpdateExpression := strings.Builder{} + dataEncoding, err := getEncoding(itemToDelete) + if err != nil { + terminationChan <- err + return + } + + var aggregationsByBucket map[int]*aggregate.AggregatesList + if aggrMask != 0 { + aggregationsByBucket = make(map[int]*aggregate.AggregatesList) + aggrBuckets := currentPartition.Times2BucketRange(deleteParams.From, deleteParams.To) + for _, bucketID := range aggrBuckets { + aggregationsByBucket[bucketID] = aggregate.NewAggregatesList(aggrMask) + } + } + + var newMaxTime int64 = math.MaxInt64 + var numberOfExpressionsInUpdate int + for attributeName, value := range itemToDelete { + if strings.HasPrefix(attributeName, "_v") { + // Check whether the whole chunk attribute needed to be deleted or just part of it. + if currentPartition.IsChunkInRangeByAttr(attributeName, deleteParams.From, deleteParams.To) { + deleteUpdateExpression.WriteString("delete(") + deleteUpdateExpression.WriteString(attributeName) + deleteUpdateExpression.WriteString(");") + } else { + currentChunksMaxTime, err := generatePartialChunkDeleteExpression(logger, &deleteUpdateExpression, attributeName, + value.([]byte), dataEncoding, deleteParams, currentPartition, aggregationsByBucket) + if err != nil { + terminationChan <- err + return + } + + // We want to save the earliest max time possible + if currentChunksMaxTime < newMaxTime { + newMaxTime = currentChunksMaxTime + } + } + numberOfExpressionsInUpdate++ + } + } + + dbMaxTime := int64(itemToDelete.GetField(config.MaxTimeAttrName).(int)) + + // Update the partition's max time if needed. + if deleteParams.From < dbMaxTime && deleteParams.To >= dbMaxTime { + if deleteParams.From < newMaxTime { + newMaxTime = deleteParams.From + } + + deleteUpdateExpression.WriteString(fmt.Sprintf("%v=%v;", config.MaxTimeAttrName, newMaxTime)) + } + + if deleteUpdateExpression.Len() > 0 { + // If there are server aggregates, update the needed buckets + if aggrMask != 0 { + for bucket, aggregations := range aggregationsByBucket { + numberOfExpressionsInUpdate = numberOfExpressionsInUpdate + len(*aggregations) + + // Due to engine limitation, If we reached maximum number of expressions in an UpdateItem + // we need to break the update into chunks + // TODO: refactor in 2.8: + // in 2.8 there is a better way of doing it by uniting multiple update expressions into + // one expression by range in a form similar to `_v_sum[15...100]=0` + if numberOfExpressionsInUpdate < maxExpressionsInUpdateItem { + deleteUpdateExpression.WriteString(aggregations.SetExpr("v", bucket)) + } else { + exprStr := deleteUpdateExpression.String() + logger.Debug("delete item '%v' with expression '%v'", fullFileName, exprStr) + mtimeSecs, mtimeNSecs, err = sendUpdateItem(fullFileName, exprStr, mtimeSecs, mtimeNSecs, container) + if err != nil { + terminationChan <- err + return + } + + // Reset stuff for next update iteration + numberOfExpressionsInUpdate = 0 + deleteUpdateExpression.Reset() + } + } + } + + // If any expressions are left, save them + if deleteUpdateExpression.Len() > 0 { + exprStr := deleteUpdateExpression.String() + logger.Debug("delete item '%v' with expression '%v'", fullFileName, exprStr) + _, _, err = sendUpdateItem(fullFileName, exprStr, mtimeSecs, mtimeNSecs, container) + if err != nil { + terminationChan <- err + return + } + } + } + } + } + } +} + +func sendUpdateItem(path, expr string, mtimeSecs, mtimeNSecs int, container v3io.Container) (int, int, error) { + condition := fmt.Sprintf("%v == %v and %v == %v", + config.MtimeSecsAttributeName, mtimeSecs, + config.MtimeNSecsAttributeName, mtimeNSecs) + + input := &v3io.UpdateItemInput{Path: path, + Expression: &expr, + Condition: condition} + + response, err := container.UpdateItemSync(input) + if err != nil && !utils.IsNotExistsOrConflictError(err) { + returnError := err + if isFalseConditionError(err) { + returnError = errors.Wrapf(err, "Item '%v' was updated while deleting occurred. Please disable any ingestion and retry.", path) + } + return 0, 0, returnError + } + + output := response.Output.(*v3io.UpdateItemOutput) + return output.MtimeSecs, output.MtimeNSecs, nil +} + +func getEncoding(itemToDelete v3io.Item) (chunkenc.Encoding, error) { + var encoding chunkenc.Encoding + encodingStr, ok := itemToDelete.GetField(config.EncodingAttrName).(string) + // If we don't have the encoding attribute, use XOR as default. (for backwards compatibility) + if !ok { + encoding = chunkenc.EncXOR + } else { + intEncoding, err := strconv.Atoi(encodingStr) + if err != nil { + return 0, fmt.Errorf("error parsing encoding type of chunk, got: %v, error: %v", encodingStr, err) + } else { + encoding = chunkenc.Encoding(intEncoding) + } + } + + return encoding, nil +} + +func generatePartialChunkDeleteExpression(logger logger.Logger, expr *strings.Builder, + attributeName string, value []byte, encoding chunkenc.Encoding, deleteParams *DeleteParams, + partition *partmgr.DBPartition, aggregationsByBucket map[int]*aggregate.AggregatesList) (int64, error) { + chunk, err := chunkenc.FromData(logger, encoding, value, 0) + if err != nil { + return 0, err + } + + newChunk := chunkenc.NewChunk(logger, encoding == chunkenc.EncVariant) + appender, err := newChunk.Appender() + if err != nil { + return 0, err + } + + var currentMaxTime int64 + var remainingItemsCount int + iter := chunk.Iterator() + for iter.Next() { + var t int64 + var v interface{} + if encoding == chunkenc.EncXOR { + t, v = iter.At() + } else { + t, v = iter.AtString() + } + + // Append back only events that are not in the delete range + if t < deleteParams.From || t > deleteParams.To { + remainingItemsCount++ + appender.Append(t, v) + + // Calculate server-side aggregations + if aggregationsByBucket != nil { + currentAgg, ok := aggregationsByBucket[partition.Time2Bucket(t)] + // A chunk may contain more data then needed for the aggregations, if this is the case do not aggregate + if ok { + currentAgg.Aggregate(t, v) + } + } + + // Update current chunk's new max time + if t > currentMaxTime { + currentMaxTime = t + } + } + } + + if remainingItemsCount == 0 { + expr.WriteString("delete(") + expr.WriteString(attributeName) + expr.WriteString(");") + currentMaxTime, _ = partition.GetChunkStartTimeByAttr(attributeName) + } else { + bytes := appender.Chunk().Bytes() + val := base64.StdEncoding.EncodeToString(bytes) + + expr.WriteString(fmt.Sprintf("%s=blob('%s'); ", attributeName, val)) + } + + return currentMaxTime, nil + +} + // Return the number of items in a TSDB table func (a *V3ioAdapter) CountMetrics(part string) (int, error) { count := 0 @@ -360,3 +829,16 @@ type Appender interface { Rollback() error Close() } + +// Check if the current error was caused specifically because the condition was evaluated to false. +func isFalseConditionError(err error) bool { + errString := err.Error() + + if strings.Count(errString, errorCodeString) == 2 && + strings.Contains(errString, falseConditionOuterErrorCode) && + strings.Contains(errString, falseConditionInnerErrorCode) { + return true + } + + return false +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go index db244d2f..18aac56d 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go @@ -24,20 +24,14 @@ package tsdb_test import ( "encoding/json" - "fmt" - "math" - "path" "sort" - "strings" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" - "github.com/v3io/v3io-tsdb/pkg/partmgr" . "github.com/v3io/v3io-tsdb/pkg/tsdb" "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils" @@ -47,7 +41,6 @@ import ( const defaultStepMs = 5 * tsdbtest.MinuteInMillis // 5 minutes func TestIngestData(t *testing.T) { - timestamp := fmt.Sprintf("%d", time.Now().Unix()) //time.Now().Format(time.RFC3339) testCases := []struct { desc string params tsdbtest.TestParams @@ -105,46 +98,6 @@ func TestIngestData(t *testing.T) { }}}, ), }, - {desc: "Should drop values of incompatible data types (prepare data for: IG-13146)", - params: tsdbtest.NewTestParams(t, - tsdbtest.TestOption{ - Key: tsdbtest.OptTimeSeries, - Value: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "IG13146", - Labels: utils.LabelsFromStringList("test", "IG-13146", "float", "string"), - Data: []tsdbtest.DataPoint{ - {Time: 15, Value: 0.1}, // first add float value - {Time: 20, Value: "some string value"}, // then attempt to add string value - {Time: 30, Value: 0.2}, // and finally add another float value - }, - ExpectedCount: func() *int { var expectedCount = 2; return &expectedCount }(), - }}}, - tsdbtest.TestOption{ - Key: "override_test_name", - Value: fmt.Sprintf("IG-13146-%s", timestamp)}), - }, - {desc: "IG-13146: Should reject values of incompatible data types without data corruption", - params: tsdbtest.NewTestParams(t, - tsdbtest.TestOption{ - Key: tsdbtest.OptTimeSeries, - Value: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "IG13146", - Labels: utils.LabelsFromStringList("test", "IG-13146", "float", "string"), - Data: []tsdbtest.DataPoint{ - {Time: 50, Value: "another string value"}, // then attempt to add string value - {Time: 60, Value: 0.4}, // valid values from this batch will be dropped - {Time: 70, Value: 0.3}, // because processing of entire batch will stop - }, - ExpectedCount: func() *int { var expectedCount = 2; return &expectedCount }(), - }}}, - tsdbtest.TestOption{ - Key: "override_test_name", - Value: fmt.Sprintf("IG-13146-%s", timestamp)}, - tsdbtest.TestOption{ - Key: "expected_error_contains_string", - // Note, the expected error message should align with pkg/appender/ingest.go:308 - Value: "trying to ingest values of incompatible data type"}), - }, } for _, test := range testCases { @@ -187,26 +140,13 @@ func testIngestDataCase(t *testing.T, testParams tsdbtest.TestParams) { } if _, err := appender.WaitForCompletion(0); err != nil { - if !isExpected(testParams, err) { - t.Fatalf("Failed to wait for appender completion. reason: %s", err) - } + t.Fatalf("Failed to wait for appender completion. reason: %s", err) } - expectedCount := len(dp.Data) - if dp.ExpectedCount != nil { - expectedCount = *dp.ExpectedCount - } - tsdbtest.ValidateCountOfSamples(t, adapter, dp.Name, expectedCount, from, to, -1) + tsdbtest.ValidateCountOfSamples(t, adapter, dp.Name, len(dp.Data), from, to, -1) } } -func isExpected(testParams tsdbtest.TestParams, actualErr error) bool { - if errMsg, ok := testParams["expected_error_contains_string"]; ok { - return strings.Contains(actualErr.Error(), fmt.Sprintf("%v", errMsg)) - } - return false -} - func TestIngestDataWithSameTimestamp(t *testing.T) { baseTime := int64(1532209200000) testParams := tsdbtest.NewTestParams(t, @@ -615,18 +555,8 @@ func testQueryDataCase(test *testing.T, testParams tsdbtest.TestParams, filter s if err != nil { test.Fatal(err) } - - for _, data := range expected[currentAggregate] { - var equalCount = 0 - for _, dp := range actual { - if dp.Equals(data) { - equalCount++ - continue - } - } - assert.Equal(test, equalCount, len(expected[currentAggregate]), - "Check failed for aggregate='%s'. Query aggregates: %s", currentAggregate, queryAggregates) - } + assert.ElementsMatch(test, expected[currentAggregate], actual, + "Check failed for aggregate='%s'. Query aggregates: %s", currentAggregate, queryAggregates) } if set.Err() != nil { @@ -752,14 +682,7 @@ func testQueryDataOverlappingWindowCase(test *testing.T, v3ioConfig *config.V3io } assert.EqualValues(test, len(windows), len(actual)) for _, data := range expected[agg] { - var equalCount = 0 - for _, dp := range actual { - if dp.Equals(data) { - equalCount++ - continue - } - } - assert.Equal(test, equalCount, len(expected[agg])) + assert.Contains(test, actual, data) } } @@ -840,17 +763,7 @@ func TestIgnoreNaNWhenSeekingAggSeries(t *testing.T) { } actual = append(actual, tsdbtest.DataPoint{Time: t1, Value: v1}) } - - for _, data := range expected[agg] { - var equalCount = 0 - for _, dp := range actual { - if dp.Equals(data) { - equalCount++ - continue - } - } - assert.Equal(t, equalCount, len(expected[agg])) - } + assert.ElementsMatch(t, expected[agg], actual) } if set.Err() != nil { @@ -923,8 +836,7 @@ func TestDeleteTSDB(t *testing.T) { t.Fatal(res.Error.Error()) } - now := time.Now().Unix() * 1000 // now time in millis - if err := adapter.DeleteDB(true, true, 0, now); err != nil { + if err := adapter.DeleteDB(DeleteParams{DeleteAll: true, IgnoreErrors: true}); err != nil { t.Fatalf("Failed to delete DB on teardown. reason: %s", err) } @@ -937,269 +849,6 @@ func TestDeleteTSDB(t *testing.T) { } } -func TestDeleteTable(t *testing.T) { - ta, _ := time.Parse(time.RFC3339, "2018-10-03T05:00:00Z") - t1 := ta.Unix() * 1000 - tb, _ := time.Parse(time.RFC3339, "2018-10-07T05:00:00Z") - t2 := tb.Unix() * 1000 - tc, _ := time.Parse(time.RFC3339, "2018-10-11T05:00:00Z") - t3 := tc.Unix() * 1000 - td, _ := time.Parse(time.RFC3339, "now + 1w") - futurePoint := td.Unix() * 1000 - - testCases := []struct { - desc string - deleteFrom int64 - deleteTo int64 - deleteAll bool - ignoreErrors bool - data []tsdbtest.DataPoint - expected []tsdbtest.DataPoint - ignoreReason string - }{ - {desc: "Should delete all table by time", - deleteFrom: 0, - deleteTo: 9999999999999, - deleteAll: false, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}}, - expected: []tsdbtest.DataPoint{}, - }, - {desc: "Should delete all table by deleteAll", - deleteFrom: 0, - deleteTo: 0, - deleteAll: true, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}, - {Time: futurePoint, Value: 555.5}}, - expected: []tsdbtest.DataPoint{}, - }, - {desc: "Should skip partial partition at begining", - deleteFrom: t1 - 10000, - deleteTo: 9999999999999, - deleteAll: false, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}}, - expected: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}}, - }, - {desc: "Should skip partial partition at end", - deleteFrom: 0, - deleteTo: t3 + 10000, - deleteAll: false, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}}, - expected: []tsdbtest.DataPoint{{Time: t3, Value: 444.4}}, - }, - {desc: "Should skip partial partition at beginning and end not in range", - deleteFrom: t1 + 10000, - deleteTo: t3 - 10000, - deleteAll: false, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}}, - expected: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t3, Value: 444.4}}, - }, - {desc: "Should skip partial partition at beginning and end although in range", - deleteFrom: t1 - 10000, - deleteTo: t3 + 10000, - deleteAll: false, - ignoreErrors: true, - data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}}, - expected: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t3, Value: 444.4}}, - }, - } - - for _, test := range testCases { - t.Run(test.desc, func(t *testing.T) { - if test.ignoreReason != "" { - t.Skip(test.ignoreReason) - } - testDeleteTSDBCase(t, - tsdbtest.NewTestParams(t, - tsdbtest.TestOption{ - Key: tsdbtest.OptDropTableOnTearDown, - Value: !test.deleteAll}, - tsdbtest.TestOption{ - Key: tsdbtest.OptTimeSeries, - Value: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "metricToDelete", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: test.data, - }}}, - ), - test.deleteFrom, test.deleteTo, test.ignoreErrors, test.deleteAll, test.expected) - }) - } -} - -func testDeleteTSDBCase(test *testing.T, testParams tsdbtest.TestParams, deleteFrom int64, deleteTo int64, ignoreErrors bool, deleteAll bool, - expected []tsdbtest.DataPoint) { - - adapter, teardown := tsdbtest.SetUpWithData(test, testParams) - defer teardown() - - container, err := utils.CreateContainer(adapter.GetLogger("container"), testParams.V3ioConfig(), adapter.HttpTimeout) - if err != nil { - test.Fatalf("failed to create new container. reason: %s", err) - } - pm, err := partmgr.NewPartitionMngr(adapter.GetSchema(), container, testParams.V3ioConfig()) - if err != nil { - test.Fatalf("Failed to create new partition manager. reason: %s", err) - } - - initiaPartitions := pm.PartsForRange(0, math.MaxInt64, true) - initialNumberOfPartitions := len(initiaPartitions) - - partitionsToDelete := pm.PartsForRange(deleteFrom, deleteTo, false) - - if err := adapter.DeleteDB(deleteAll, ignoreErrors, deleteFrom, deleteTo); err != nil { - test.Fatalf("Failed to delete DB. reason: %s", err) - } - - if !deleteAll { - pm1, err := partmgr.NewPartitionMngr(adapter.GetSchema(), container, testParams.V3ioConfig()) - remainingParts := pm1.PartsForRange(0, math.MaxInt64, false) - assert.Equal(test, len(remainingParts), initialNumberOfPartitions-len(partitionsToDelete)) - - qry, err := adapter.Querier(nil, 0, math.MaxInt64) - if err != nil { - test.Fatalf("Failed to create Querier. reason: %v", err) - } - - for _, metric := range testParams.TimeSeries() { - set, err := qry.Select(metric.Name, "", 0, "") - if err != nil { - test.Fatalf("Failed to run Select. reason: %v", err) - } - - set.Next() - if set.Err() != nil { - test.Fatalf("Failed to query metric. reason: %v", set.Err()) - } - - series := set.At() - if series == nil && len(expected) == 0 { - //table is expected to be empty - } else if series != nil { - iter := series.Iterator() - if iter.Err() != nil { - test.Fatalf("Failed to query data series. reason: %v", iter.Err()) - } - - actual, err := iteratorToSlice(iter) - if err != nil { - test.Fatal(err) - } - assert.ElementsMatch(test, expected, actual) - } else { - test.Fatalf("Result series is empty while expected result set is not!") - } - } - } else { - container, tablePath := adapter.GetContainer() - tableSchemaPath := path.Join(tablePath, config.SchemaConfigFileName) - - // Validate: schema does not exist - _, err := container.GetObjectSync(&v3io.GetObjectInput{Path: tableSchemaPath}) - if err != nil { - if utils.IsNotExistsError(err) { - // OK - expected - } else { - test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tableSchemaPath, err) - } - } - - // Validate: table does not exist - _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: tablePath}) - if err != nil { - if utils.IsNotExistsError(err) { - // OK - expected - } else { - test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tablePath, err) - } - } - } -} - -func TestIngestDataFloatThenString(t *testing.T) { - testParams := tsdbtest.NewTestParams(t) - - defer tsdbtest.SetUp(t, testParams)() - - adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) - if err != nil { - t.Fatalf("Failed to create v3io adapter. reason: %s", err) - } - - appender, err := adapter.Appender() - if err != nil { - t.Fatalf("Failed to get appender. reason: %s", err) - } - - labels := utils.Labels{utils.Label{Name: "__name__", Value: "cpu"}} - _, err = appender.Add(labels, 1532940510000, 12.0) - if err != nil { - t.Fatalf("Failed to add data to appender. reason: %s", err) - } - - _, err = appender.Add(labels, 1532940610000, "tal") - if err == nil { - t.Fatal("expected failure but finished successfully") - } - - if _, err := appender.WaitForCompletion(0); err != nil { - t.Fatalf("Failed to wait for appender completion. reason: %s", err) - } - - tsdbtest.ValidateCountOfSamples(t, adapter, "cpu", 1, 0, 1532950510000, -1) -} - -func TestIngestDataStringThenFloat(t *testing.T) { - testParams := tsdbtest.NewTestParams(t) - - defer tsdbtest.SetUp(t, testParams)() - - adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) - if err != nil { - t.Fatalf("Failed to create v3io adapter. reason: %s", err) - } - - appender, err := adapter.Appender() - if err != nil { - t.Fatalf("Failed to get appender. reason: %s", err) - } - - labels := utils.Labels{utils.Label{Name: "__name__", Value: "cpu"}} - _, err = appender.Add(labels, 1532940510000, "tal") - if err != nil { - t.Fatalf("Failed to add data to appender. reason: %s", err) - } - - _, err = appender.Add(labels, 1532940610000, 666.0) - if err == nil { - t.Fatal("expected failure but finished successfully") - } - - if _, err := appender.WaitForCompletion(0); err != nil { - t.Fatalf("Failed to wait for appender completion. reason: %s", err) - } - - tsdbtest.ValidateCountOfSamples(t, adapter, "cpu", 1, 0, 1532950510000, -1) -} - func iteratorToSlice(it chunkenc.Iterator) ([]tsdbtest.DataPoint, error) { var result []tsdbtest.DataPoint for it.Next() { diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go index dbda9752..9d33c58a 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go @@ -30,17 +30,21 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb" "github.com/v3io/v3io-tsdb/pkg/utils" ) type delCommandeer struct { cmd *cobra.Command rootCommandeer *RootCommandeer - deleteAll bool - ignoreErrors bool force bool - fromTime string - toTime string + + deleteAll bool + ignoreErrors bool + fromTime string + toTime string + filter string + metrics string } func newDeleteCommandeer(rootCommandeer *RootCommandeer) *delCommandeer { @@ -66,6 +70,9 @@ Notes: metric items with older or newer times. Use the info command to view the partitioning interval.`, RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + return errors.New("delete does not accept unnamed arguments. Did you forget to use a flag?") + } // Initialize parameters return commandeer.delete() }, @@ -81,6 +88,10 @@ Notes: "End (maximum) time for the delete operation, as a string containing an\nRFC 3339 time string, a Unix timestamp in milliseconds, or a relative\ntime of the format \"now\" or \"now-[0-9]+[mhd]\" (where 'm' = minutes,\n'h' = hours, and 'd' = days). Examples: \"2018-09-26T14:10:20Z\";\n\"1537971006000\"; \"now-3h\"; \"now-7d\". (default \"now\")") cmd.Flags().StringVarP(&commandeer.fromTime, "begin", "b", "", "Start (minimum) time for the delete operation, as a string containing\nan RFC 3339 time, a Unix timestamp in milliseconds, a relative time of\nthe format \"now\" or \"now-[0-9]+[mhd]\" (where 'm' = minutes, 'h' = hours,\nand 'd' = days), or 0 for the earliest time. Examples:\n\"2016-01-02T15:34:26Z\"; \"1451748866\"; \"now-90m\"; \"0\". (default =\n - 1h)") + cmd.Flags().StringVar(&commandeer.filter, "filter", "", + "Query filter, as an Iguazio Data Science Platform\nfilter expression. \nExamples: \"method=='get'\"; \"method=='get' AND os=='win'\".") + cmd.Flags().StringVarP(&commandeer.metrics, "metrics", "m", "", + "Comma-separated list of metric names to delete. If you don't set this argument, all metrics will be deleted according to the time range and filter specified.") commandeer.cmd = cmd return commandeer @@ -128,7 +139,20 @@ func (dc *delCommandeer) delete() error { } } - err = dc.rootCommandeer.adapter.DeleteDB(dc.deleteAll, dc.ignoreErrors, from, to) + var metricsToDelete []string + if dc.metrics != "" { + for _, m := range strings.Split(dc.metrics, ",") { + metricsToDelete = append(metricsToDelete, strings.TrimSpace(m)) + } + } + + params := tsdb.DeleteParams{DeleteAll: dc.deleteAll, + IgnoreErrors: dc.ignoreErrors, + From: from, + To: to, + Metrics: metricsToDelete, + Filter: dc.filter} + err = dc.rootCommandeer.adapter.DeleteDB(params) if err != nil { return errors.Wrapf(err, "Failed to delete %s TSDB table '%s' in container '%s'.", partialMsg, dc.rootCommandeer.v3iocfg.TablePath, dc.rootCommandeer.v3iocfg.Container) } diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go index e7f32bbb..cf34e8db 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go @@ -37,21 +37,16 @@ func IsNotExistsError(err error) bool { return false } -const ( - errorCodeString = "ErrorCode" - falseConditionOuterErrorCode = "16777244" - falseConditionInnerErrorCode = "16777245" -) - -// Check if the current error was caused specifically because the condition was evaluated to false. -func IsFalseConditionError(err error) bool { - errString := err.Error() - - if strings.Count(errString, errorCodeString) == 2 && - strings.Contains(errString, falseConditionOuterErrorCode) && - strings.Contains(errString, falseConditionInnerErrorCode) { +func IsNotExistsOrConflictError(err error) bool { + errorWithStatusCode, ok := err.(v3ioerrors.ErrorWithStatusCode) + if !ok { + // error of different type + return false + } + statusCode := errorWithStatusCode.StatusCode() + // Ignore 404s and 409s + if statusCode == http.StatusNotFound || statusCode == http.StatusConflict { return true } - return false } diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go index 694b5b2a..dc0fa165 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go @@ -157,19 +157,7 @@ func BenchmarkIngest(b *testing.B) { } func isValidDataPoint(prev, current *tsdbtest.DataPoint) bool { - if current.Time > prev.Time { - switch cv := current.Value.(type) { - case float64: - if pv, ok := prev.Value.(float64); ok { - return int64(cv)-int64(pv) == 1 - } - case string: - return true - default: - return false - } - } - return false + return int64(current.Value)-int64(prev.Value) == 1 && current.Time > prev.Time } func runTest( diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/errors.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/errors.go deleted file mode 100644 index dc70312e..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/errors.go +++ /dev/null @@ -1,285 +0,0 @@ -// Package errors provides an api similar to github.com/nuclio/nuclio/pkg/errors -// However we don't carry stack trace around for performance -// (see https://github.com/pkg/errors/issues/124) -package errors - -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported -// -// %s print the error -// %+v extended format. Will print stack trace of errors - -import ( - "bytes" - "fmt" - "io" - "os" - "runtime" - "strings" -) - -var ( - // ShowLineInfo sets if we collect location information (file, line) - // (getting location information makes creating error slower ~550ns vs 2ns) - ShowLineInfo bool -) - -// Error implements error interface with call stack -type Error struct { - message string - cause error - fileName string - lineNumber int -} - -func init() { - ShowLineInfo = len(os.Getenv("NUCLIO_NO_ERROR_LINE_INFO")) == 0 -} - -// caller return the caller informatin (file, line) -// Note this is sensitive to where it's called -func caller() (string, int) { - pcs := make([]uintptr, 1) - // skip 3 levels to get to the caller - n := runtime.Callers(3, pcs) - if n == 0 { - return "", 0 - } - - pc := pcs[0] - 1 - fn := runtime.FuncForPC(pc) - if fn == nil { - return "", 0 - } - - return fn.FileLine(pc) -} - -// New returns a new error -func New(message string) error { - err := &Error{message: message} - if ShowLineInfo { - err.fileName, err.lineNumber = caller() - } - return err -} - -// Errorf returns a new Error -func Errorf(format string, args ...interface{}) error { - err := &Error{message: fmt.Sprintf(format, args...)} - if ShowLineInfo { - err.fileName, err.lineNumber = caller() - } - return err -} - -// Wrap returns a new error with err as cause, if err is nil will return nil -func Wrap(err error, message string) error { - if err == nil { - return nil - } - - errObj := &Error{ - message: message, - cause: err, - } - - if ShowLineInfo { - errObj.fileName, errObj.lineNumber = caller() - } - return errObj -} - -// Wrapf returns a new error with err as cause, if err is nil will return nil -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - - message := fmt.Sprintf(format, args...) - errObj := &Error{ - message: message, - cause: err, - } - if ShowLineInfo { - errObj.fileName, errObj.lineNumber = caller() - } - return errObj -} - -// Error is the string representation of the error -func (err *Error) Error() string { - return err.message -} - -// Cause returns the cause of the error -func (err *Error) Cause() error { - return err.cause -} - -func asError(err error) *Error { - errObj, ok := err.(*Error) - if !ok { - return nil - } - return errObj -} - -// LineInfo info returns the location (file, line) where the error was created -func (err *Error) LineInfo() (string, int) { - return err.fileName, err.lineNumber -} - -// reverse reverses a slice in place -func reverse(slice []error) { - for left, right := 0, len(slice)-1; left < right; left, right = left+1, right-1 { - slice[left], slice[right] = slice[right], slice[left] - } -} - -// GetErrorStack return stack of messges (oldest on top) -// if n == -1 returns the whole stack -func GetErrorStack(err error, depth int) []error { - errors := []error{err} - - errObj := asError(err) - if errObj == nil { - return errors - } - - for errObj = asError(errObj.cause); errObj != nil; errObj = asError(errObj.cause) { - errors = append(errors, errObj) - } - - reverse(errors) - if depth > 0 { - if depth > len(errors) { - depth = len(errors) - } - errors = errors[:depth] - } - return errors -} - -// GetErrorStackString returns the error stack as a string -func GetErrorStackString(err error, depth int) string { - buffer := bytes.Buffer{} - - PrintErrorStack(&buffer, err, depth) - - return buffer.String() -} - -// PrintErrorStack prints the error stack into out up to depth levels -// If n == 1 then prints the whole stack -func PrintErrorStack(out io.Writer, err error, depth int) { - if err == nil { - return - } - - pathLen := 40 - - stack := GetErrorStack(err, depth) - errObj := asError(stack[0]) - - if errObj != nil && errObj.lineNumber != 0 { - cause := errObj.Error() - if errObj.cause != nil { - cause = errObj.cause.Error() - } - - fmt.Fprintf(out, "\nError - %s", cause) // nolint: errcheck - fmt.Fprintf(out, "\n %s:%d\n", trimPath(errObj.fileName, pathLen), errObj.lineNumber) // nolint: errcheck - } else { - fmt.Fprintf(out, "\nError - %s", stack[0].Error()) // nolint: errcheck - } - - fmt.Fprintf(out, "\nCall stack:") // nolint: errcheck - - for _, e := range stack { - errObj := asError(e) - fmt.Fprintf(out, "\n%s", e.Error()) // nolint: errcheck - if errObj != nil && errObj.lineNumber != 0 { - fmt.Fprintf(out, "\n %s:%d", trimPath(errObj.fileName, pathLen), errObj.lineNumber) // nolint: errcheck - } - } - - out.Write([]byte{'\n'}) // nolint: errcheck -} - -// Cause is the cause of the error -func Cause(err error) error { - var cause error - - if err == nil { - return nil - } - - errAsError := asError(err) - if errAsError != nil { - cause = errAsError.cause - } - - // treat the err as simply an error - if cause == nil { - cause = err - } - - return cause -} - -// RootCause is the cause of the error -func RootCause(err error) error { - currentErr := err - for { - cause := Cause(currentErr) - - // if there's a cause go deeper - if cause == nil || cause == currentErr { - break - } - - currentErr = cause - } - - return currentErr -} - -// sumLengths return sum of lengths of strings -func sumLengths(parts []string) int { - total := 0 - for _, s := range parts { - total += len(s) - } - return total -} - -// trimPath shortens fileName to be at most size characters -func trimPath(fileName string, size int) string { - if len(fileName) <= size { - return fileName - } - - // We'd like to cut at directory boundary - parts := strings.Split(fileName, "/") - for sumLengths(parts) > size && len(parts) > 1 { - parts = parts[1:] - } - - return ".../" + strings.Join(parts, "/") -} - -// Format formats an error -func (err *Error) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - PrintErrorStack(s, err, -1) - } - fallthrough - case 's': - fmt.Fprintf(s, err.Error()) // nolint: errcheck - case 'q': - fmt.Fprintf(s, "%q", err.Error()) // nolint: errcheck - } -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.mod b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.mod deleted file mode 100644 index c242fb09..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/nuclio/errors - -go 1.12 diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.sum b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/errors/go.sum deleted file mode 100644 index e69de29b..00000000 diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.gitignore b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.gitignore deleted file mode 100644 index 485dee64..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.travis.yml b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.travis.yml deleted file mode 100644 index 8d528872..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go -go_import_path: github.com/nuclio/nuclio-sdk-go -go: - - "1.10" - - "1.9" -script: make test diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/HACK.md b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/HACK.md deleted file mode 100644 index bda4d560..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/HACK.md +++ /dev/null @@ -1,6 +0,0 @@ -# Hacking on nuclio-sdk - -## errors.go - -`errors.go` is automatically generated. If you bump Go version or suspect there -might be changes, run `go generate` to generate it. diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/LICENSE b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/LICENSE deleted file mode 100644 index 8dada3ed..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/Makefile b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/Makefile deleted file mode 100644 index c5f3b06c..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2017 The Nuclio Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -test: lint - go test -v . - -lint: deps - go get -u github.com/pavius/impi/cmd/impi - go get -u gopkg.in/alecthomas/gometalinter.v2 - @$(GOPATH)/bin/gometalinter.v2 --install - @echo Verifying imports... - $(GOPATH)/bin/impi \ - --local github.com/nuclio/nuclio/ \ - --scheme stdLocalThirdParty \ - ./... - @echo Linting... - @$(GOPATH)/bin/gometalinter.v2 \ - --deadline=300s \ - --disable-all \ - --enable-gc \ - --enable=deadcode \ - --enable=goconst \ - --enable=gofmt \ - --enable=golint \ - --enable=gosimple \ - --enable=ineffassign \ - --enable=interfacer \ - --enable=misspell \ - --enable=staticcheck \ - --enable=staticcheck \ - --enable=unconvert \ - --enable=varcheck \ - --enable=vet \ - --enable=vetshadow \ - --exclude="_test.go" \ - --exclude="comment on" \ - --exclude="error should be the last" \ - --exclude="should have comment" \ - . - - @echo Done. - -deps: - go get -u github.com/nuclio/logger diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/README.md b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/README.md deleted file mode 100644 index e5c53649..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Go SDK for nuclio - -To get started with nuclio, see https://github.com/nuclio/nuclio. \ No newline at end of file diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/context.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/context.go deleted file mode 100644 index 83bb405c..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/context.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -import "github.com/nuclio/logger" - -// Context holds objects whose lifetime is that of the function instance -type Context struct { - - // Logger allows submitting information to logger sinks configured in the platform - Logger logger.Logger - - // DataBinding holds a map of . For example, if the user - // configured the function to bind to an Azure Event Hub, it will hold an instance of an Event Hub - // client. The user can type cast this to the client type - DataBinding map[string]DataBinding - - // WorkerID holds the unique identifier of the worker currently handling the event. It can be used - // to key into shared datasets to prevent locking - WorkerID int - - // UserData is nil by default. This holds information set by the user should he need access to long - // living data. The lifetime of this pointer is that of the _worker_ and workers can come and go. - // Treat this like cache - always check if it's nil prior to access and re-populate if necessary - UserData interface{} - - // FunctionName holds the name of the function currently running - FunctionName string - - // FunctionVersion holds the version of the function currently running - FunctionVersion int - - // TriggerName holds the information about the invoking trigger in this context - TriggerName string -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/databinding.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/databinding.go deleted file mode 100644 index 6241bdce..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/databinding.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -// DataBinding defines a generic interface to data sources configured in the function. For the time being -// there is no "abstract" data interface and user will cast this to the specific data source client -type DataBinding interface{} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/doc.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/doc.go deleted file mode 100644 index d10468fb..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -/* -SDK for working with Nuclio - -See README.md for more details. -*/ diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errgen.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errgen.go deleted file mode 100644 index 05e7894c..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errgen.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//go:generate go run gen_errors.go -//go:generate go fmt errors.go - -package nuclio - -// This file exists only to generate errors.go -// To do that - run: go generate diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errors.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errors.go deleted file mode 100644 index 5de57ca3..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/errors.go +++ /dev/null @@ -1,1223 +0,0 @@ -// Automatically generated by gen_errors.go - -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -import ( - "errors" - "fmt" - "net/http" -) - -// WithStatusCode is an error with status code -type WithStatusCode interface { - StatusCode() int -} - -// ErrorWithStatusCode implements both error and WithStatusCode -type ErrorWithStatusCode struct { - error - statusCode int -} - -// GetError returns the underlying error -func (e *ErrorWithStatusCode) GetError() error { - return e.error -} - -// StatusCode returns the status code -func (e *ErrorWithStatusCode) StatusCode() int { - return e.statusCode -} - -// Error returns the error message -func (e ErrorWithStatusCode) Error() string { - if e.error != nil { - return e.error.Error() - } - - message, ok := defaultMessages[e.statusCode] - if !ok { - message = fmt.Sprintf("Unknown error: %d", e.statusCode) - } - - return message -} - -// ErrAccepted is a StatusAccepted Error -var ErrAccepted = ErrorWithStatusCode{statusCode: http.StatusAccepted} - -// NewErrAccepted returns a new ErrAccepted with custom error message -func NewErrAccepted(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusAccepted, - } -} - -// WrapErrAccepted returns a new ErrAccepted, wrapping an existing error -func WrapErrAccepted(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusAccepted, - } -} - -// ErrAlreadyReported is a StatusAlreadyReported Error -var ErrAlreadyReported = ErrorWithStatusCode{statusCode: http.StatusAlreadyReported} - -// NewErrAlreadyReported returns a new ErrAlreadyReported with custom error message -func NewErrAlreadyReported(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusAlreadyReported, - } -} - -// WrapErrAlreadyReported returns a new ErrAlreadyReported, wrapping an existing error -func WrapErrAlreadyReported(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusAlreadyReported, - } -} - -// ErrBadGateway is a StatusBadGateway Error -var ErrBadGateway = ErrorWithStatusCode{statusCode: http.StatusBadGateway} - -// NewErrBadGateway returns a new ErrBadGateway with custom error message -func NewErrBadGateway(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusBadGateway, - } -} - -// WrapErrBadGateway returns a new ErrBadGateway, wrapping an existing error -func WrapErrBadGateway(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusBadGateway, - } -} - -// ErrBadRequest is a StatusBadRequest Error -var ErrBadRequest = ErrorWithStatusCode{statusCode: http.StatusBadRequest} - -// NewErrBadRequest returns a new ErrBadRequest with custom error message -func NewErrBadRequest(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusBadRequest, - } -} - -// WrapErrBadRequest returns a new ErrBadRequest, wrapping an existing error -func WrapErrBadRequest(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusBadRequest, - } -} - -// ErrConflict is a StatusConflict Error -var ErrConflict = ErrorWithStatusCode{statusCode: http.StatusConflict} - -// NewErrConflict returns a new ErrConflict with custom error message -func NewErrConflict(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusConflict, - } -} - -// WrapErrConflict returns a new ErrConflict, wrapping an existing error -func WrapErrConflict(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusConflict, - } -} - -// ErrContinue is a StatusContinue Error -var ErrContinue = ErrorWithStatusCode{statusCode: http.StatusContinue} - -// NewErrContinue returns a new ErrContinue with custom error message -func NewErrContinue(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusContinue, - } -} - -// WrapErrContinue returns a new ErrContinue, wrapping an existing error -func WrapErrContinue(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusContinue, - } -} - -// ErrCreated is a StatusCreated Error -var ErrCreated = ErrorWithStatusCode{statusCode: http.StatusCreated} - -// NewErrCreated returns a new ErrCreated with custom error message -func NewErrCreated(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusCreated, - } -} - -// WrapErrCreated returns a new ErrCreated, wrapping an existing error -func WrapErrCreated(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusCreated, - } -} - -// ErrExpectationFailed is a StatusExpectationFailed Error -var ErrExpectationFailed = ErrorWithStatusCode{statusCode: http.StatusExpectationFailed} - -// NewErrExpectationFailed returns a new ErrExpectationFailed with custom error message -func NewErrExpectationFailed(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusExpectationFailed, - } -} - -// WrapErrExpectationFailed returns a new ErrExpectationFailed, wrapping an existing error -func WrapErrExpectationFailed(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusExpectationFailed, - } -} - -// ErrFailedDependency is a StatusFailedDependency Error -var ErrFailedDependency = ErrorWithStatusCode{statusCode: http.StatusFailedDependency} - -// NewErrFailedDependency returns a new ErrFailedDependency with custom error message -func NewErrFailedDependency(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusFailedDependency, - } -} - -// WrapErrFailedDependency returns a new ErrFailedDependency, wrapping an existing error -func WrapErrFailedDependency(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusFailedDependency, - } -} - -// ErrForbidden is a StatusForbidden Error -var ErrForbidden = ErrorWithStatusCode{statusCode: http.StatusForbidden} - -// NewErrForbidden returns a new ErrForbidden with custom error message -func NewErrForbidden(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusForbidden, - } -} - -// WrapErrForbidden returns a new ErrForbidden, wrapping an existing error -func WrapErrForbidden(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusForbidden, - } -} - -// ErrFound is a StatusFound Error -var ErrFound = ErrorWithStatusCode{statusCode: http.StatusFound} - -// NewErrFound returns a new ErrFound with custom error message -func NewErrFound(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusFound, - } -} - -// WrapErrFound returns a new ErrFound, wrapping an existing error -func WrapErrFound(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusFound, - } -} - -// ErrGatewayTimeout is a StatusGatewayTimeout Error -var ErrGatewayTimeout = ErrorWithStatusCode{statusCode: http.StatusGatewayTimeout} - -// NewErrGatewayTimeout returns a new ErrGatewayTimeout with custom error message -func NewErrGatewayTimeout(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusGatewayTimeout, - } -} - -// WrapErrGatewayTimeout returns a new ErrGatewayTimeout, wrapping an existing error -func WrapErrGatewayTimeout(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusGatewayTimeout, - } -} - -// ErrGone is a StatusGone Error -var ErrGone = ErrorWithStatusCode{statusCode: http.StatusGone} - -// NewErrGone returns a new ErrGone with custom error message -func NewErrGone(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusGone, - } -} - -// WrapErrGone returns a new ErrGone, wrapping an existing error -func WrapErrGone(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusGone, - } -} - -// ErrHTTPVersionNotSupported is a StatusHTTPVersionNotSupported Error -var ErrHTTPVersionNotSupported = ErrorWithStatusCode{statusCode: http.StatusHTTPVersionNotSupported} - -// NewErrHTTPVersionNotSupported returns a new ErrHTTPVersionNotSupported with custom error message -func NewErrHTTPVersionNotSupported(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusHTTPVersionNotSupported, - } -} - -// WrapErrHTTPVersionNotSupported returns a new ErrHTTPVersionNotSupported, wrapping an existing error -func WrapErrHTTPVersionNotSupported(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusHTTPVersionNotSupported, - } -} - -// ErrIMUsed is a StatusIMUsed Error -var ErrIMUsed = ErrorWithStatusCode{statusCode: http.StatusIMUsed} - -// NewErrIMUsed returns a new ErrIMUsed with custom error message -func NewErrIMUsed(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusIMUsed, - } -} - -// WrapErrIMUsed returns a new ErrIMUsed, wrapping an existing error -func WrapErrIMUsed(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusIMUsed, - } -} - -// ErrInsufficientStorage is a StatusInsufficientStorage Error -var ErrInsufficientStorage = ErrorWithStatusCode{statusCode: http.StatusInsufficientStorage} - -// NewErrInsufficientStorage returns a new ErrInsufficientStorage with custom error message -func NewErrInsufficientStorage(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusInsufficientStorage, - } -} - -// WrapErrInsufficientStorage returns a new ErrInsufficientStorage, wrapping an existing error -func WrapErrInsufficientStorage(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusInsufficientStorage, - } -} - -// ErrInternalServerError is a StatusInternalServerError Error -var ErrInternalServerError = ErrorWithStatusCode{statusCode: http.StatusInternalServerError} - -// NewErrInternalServerError returns a new ErrInternalServerError with custom error message -func NewErrInternalServerError(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusInternalServerError, - } -} - -// WrapErrInternalServerError returns a new ErrInternalServerError, wrapping an existing error -func WrapErrInternalServerError(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusInternalServerError, - } -} - -// ErrLengthRequired is a StatusLengthRequired Error -var ErrLengthRequired = ErrorWithStatusCode{statusCode: http.StatusLengthRequired} - -// NewErrLengthRequired returns a new ErrLengthRequired with custom error message -func NewErrLengthRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusLengthRequired, - } -} - -// WrapErrLengthRequired returns a new ErrLengthRequired, wrapping an existing error -func WrapErrLengthRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusLengthRequired, - } -} - -// ErrLocked is a StatusLocked Error -var ErrLocked = ErrorWithStatusCode{statusCode: http.StatusLocked} - -// NewErrLocked returns a new ErrLocked with custom error message -func NewErrLocked(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusLocked, - } -} - -// WrapErrLocked returns a new ErrLocked, wrapping an existing error -func WrapErrLocked(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusLocked, - } -} - -// ErrLoopDetected is a StatusLoopDetected Error -var ErrLoopDetected = ErrorWithStatusCode{statusCode: http.StatusLoopDetected} - -// NewErrLoopDetected returns a new ErrLoopDetected with custom error message -func NewErrLoopDetected(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusLoopDetected, - } -} - -// WrapErrLoopDetected returns a new ErrLoopDetected, wrapping an existing error -func WrapErrLoopDetected(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusLoopDetected, - } -} - -// ErrMethodNotAllowed is a StatusMethodNotAllowed Error -var ErrMethodNotAllowed = ErrorWithStatusCode{statusCode: http.StatusMethodNotAllowed} - -// NewErrMethodNotAllowed returns a new ErrMethodNotAllowed with custom error message -func NewErrMethodNotAllowed(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusMethodNotAllowed, - } -} - -// WrapErrMethodNotAllowed returns a new ErrMethodNotAllowed, wrapping an existing error -func WrapErrMethodNotAllowed(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusMethodNotAllowed, - } -} - -// ErrMovedPermanently is a StatusMovedPermanently Error -var ErrMovedPermanently = ErrorWithStatusCode{statusCode: http.StatusMovedPermanently} - -// NewErrMovedPermanently returns a new ErrMovedPermanently with custom error message -func NewErrMovedPermanently(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusMovedPermanently, - } -} - -// WrapErrMovedPermanently returns a new ErrMovedPermanently, wrapping an existing error -func WrapErrMovedPermanently(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusMovedPermanently, - } -} - -// ErrMultiStatus is a StatusMultiStatus Error -var ErrMultiStatus = ErrorWithStatusCode{statusCode: http.StatusMultiStatus} - -// NewErrMultiStatus returns a new ErrMultiStatus with custom error message -func NewErrMultiStatus(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusMultiStatus, - } -} - -// WrapErrMultiStatus returns a new ErrMultiStatus, wrapping an existing error -func WrapErrMultiStatus(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusMultiStatus, - } -} - -// ErrMultipleChoices is a StatusMultipleChoices Error -var ErrMultipleChoices = ErrorWithStatusCode{statusCode: http.StatusMultipleChoices} - -// NewErrMultipleChoices returns a new ErrMultipleChoices with custom error message -func NewErrMultipleChoices(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusMultipleChoices, - } -} - -// WrapErrMultipleChoices returns a new ErrMultipleChoices, wrapping an existing error -func WrapErrMultipleChoices(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusMultipleChoices, - } -} - -// ErrNetworkAuthenticationRequired is a StatusNetworkAuthenticationRequired Error -var ErrNetworkAuthenticationRequired = ErrorWithStatusCode{statusCode: http.StatusNetworkAuthenticationRequired} - -// NewErrNetworkAuthenticationRequired returns a new ErrNetworkAuthenticationRequired with custom error message -func NewErrNetworkAuthenticationRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNetworkAuthenticationRequired, - } -} - -// WrapErrNetworkAuthenticationRequired returns a new ErrNetworkAuthenticationRequired, wrapping an existing error -func WrapErrNetworkAuthenticationRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNetworkAuthenticationRequired, - } -} - -// ErrNoContent is a StatusNoContent Error -var ErrNoContent = ErrorWithStatusCode{statusCode: http.StatusNoContent} - -// NewErrNoContent returns a new ErrNoContent with custom error message -func NewErrNoContent(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNoContent, - } -} - -// WrapErrNoContent returns a new ErrNoContent, wrapping an existing error -func WrapErrNoContent(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNoContent, - } -} - -// ErrNonAuthoritativeInfo is a StatusNonAuthoritativeInfo Error -var ErrNonAuthoritativeInfo = ErrorWithStatusCode{statusCode: http.StatusNonAuthoritativeInfo} - -// NewErrNonAuthoritativeInfo returns a new ErrNonAuthoritativeInfo with custom error message -func NewErrNonAuthoritativeInfo(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNonAuthoritativeInfo, - } -} - -// WrapErrNonAuthoritativeInfo returns a new ErrNonAuthoritativeInfo, wrapping an existing error -func WrapErrNonAuthoritativeInfo(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNonAuthoritativeInfo, - } -} - -// ErrNotAcceptable is a StatusNotAcceptable Error -var ErrNotAcceptable = ErrorWithStatusCode{statusCode: http.StatusNotAcceptable} - -// NewErrNotAcceptable returns a new ErrNotAcceptable with custom error message -func NewErrNotAcceptable(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNotAcceptable, - } -} - -// WrapErrNotAcceptable returns a new ErrNotAcceptable, wrapping an existing error -func WrapErrNotAcceptable(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNotAcceptable, - } -} - -// ErrNotExtended is a StatusNotExtended Error -var ErrNotExtended = ErrorWithStatusCode{statusCode: http.StatusNotExtended} - -// NewErrNotExtended returns a new ErrNotExtended with custom error message -func NewErrNotExtended(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNotExtended, - } -} - -// WrapErrNotExtended returns a new ErrNotExtended, wrapping an existing error -func WrapErrNotExtended(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNotExtended, - } -} - -// ErrNotFound is a StatusNotFound Error -var ErrNotFound = ErrorWithStatusCode{statusCode: http.StatusNotFound} - -// NewErrNotFound returns a new ErrNotFound with custom error message -func NewErrNotFound(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNotFound, - } -} - -// WrapErrNotFound returns a new ErrNotFound, wrapping an existing error -func WrapErrNotFound(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNotFound, - } -} - -// ErrNotImplemented is a StatusNotImplemented Error -var ErrNotImplemented = ErrorWithStatusCode{statusCode: http.StatusNotImplemented} - -// NewErrNotImplemented returns a new ErrNotImplemented with custom error message -func NewErrNotImplemented(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNotImplemented, - } -} - -// WrapErrNotImplemented returns a new ErrNotImplemented, wrapping an existing error -func WrapErrNotImplemented(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNotImplemented, - } -} - -// ErrNotModified is a StatusNotModified Error -var ErrNotModified = ErrorWithStatusCode{statusCode: http.StatusNotModified} - -// NewErrNotModified returns a new ErrNotModified with custom error message -func NewErrNotModified(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusNotModified, - } -} - -// WrapErrNotModified returns a new ErrNotModified, wrapping an existing error -func WrapErrNotModified(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusNotModified, - } -} - -// ErrPartialContent is a StatusPartialContent Error -var ErrPartialContent = ErrorWithStatusCode{statusCode: http.StatusPartialContent} - -// NewErrPartialContent returns a new ErrPartialContent with custom error message -func NewErrPartialContent(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusPartialContent, - } -} - -// WrapErrPartialContent returns a new ErrPartialContent, wrapping an existing error -func WrapErrPartialContent(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusPartialContent, - } -} - -// ErrPaymentRequired is a StatusPaymentRequired Error -var ErrPaymentRequired = ErrorWithStatusCode{statusCode: http.StatusPaymentRequired} - -// NewErrPaymentRequired returns a new ErrPaymentRequired with custom error message -func NewErrPaymentRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusPaymentRequired, - } -} - -// WrapErrPaymentRequired returns a new ErrPaymentRequired, wrapping an existing error -func WrapErrPaymentRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusPaymentRequired, - } -} - -// ErrPermanentRedirect is a StatusPermanentRedirect Error -var ErrPermanentRedirect = ErrorWithStatusCode{statusCode: http.StatusPermanentRedirect} - -// NewErrPermanentRedirect returns a new ErrPermanentRedirect with custom error message -func NewErrPermanentRedirect(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusPermanentRedirect, - } -} - -// WrapErrPermanentRedirect returns a new ErrPermanentRedirect, wrapping an existing error -func WrapErrPermanentRedirect(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusPermanentRedirect, - } -} - -// ErrPreconditionFailed is a StatusPreconditionFailed Error -var ErrPreconditionFailed = ErrorWithStatusCode{statusCode: http.StatusPreconditionFailed} - -// NewErrPreconditionFailed returns a new ErrPreconditionFailed with custom error message -func NewErrPreconditionFailed(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusPreconditionFailed, - } -} - -// WrapErrPreconditionFailed returns a new ErrPreconditionFailed, wrapping an existing error -func WrapErrPreconditionFailed(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusPreconditionFailed, - } -} - -// ErrPreconditionRequired is a StatusPreconditionRequired Error -var ErrPreconditionRequired = ErrorWithStatusCode{statusCode: http.StatusPreconditionRequired} - -// NewErrPreconditionRequired returns a new ErrPreconditionRequired with custom error message -func NewErrPreconditionRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusPreconditionRequired, - } -} - -// WrapErrPreconditionRequired returns a new ErrPreconditionRequired, wrapping an existing error -func WrapErrPreconditionRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusPreconditionRequired, - } -} - -// ErrProcessing is a StatusProcessing Error -var ErrProcessing = ErrorWithStatusCode{statusCode: http.StatusProcessing} - -// NewErrProcessing returns a new ErrProcessing with custom error message -func NewErrProcessing(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusProcessing, - } -} - -// WrapErrProcessing returns a new ErrProcessing, wrapping an existing error -func WrapErrProcessing(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusProcessing, - } -} - -// ErrProxyAuthRequired is a StatusProxyAuthRequired Error -var ErrProxyAuthRequired = ErrorWithStatusCode{statusCode: http.StatusProxyAuthRequired} - -// NewErrProxyAuthRequired returns a new ErrProxyAuthRequired with custom error message -func NewErrProxyAuthRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusProxyAuthRequired, - } -} - -// WrapErrProxyAuthRequired returns a new ErrProxyAuthRequired, wrapping an existing error -func WrapErrProxyAuthRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusProxyAuthRequired, - } -} - -// ErrRequestEntityTooLarge is a StatusRequestEntityTooLarge Error -var ErrRequestEntityTooLarge = ErrorWithStatusCode{statusCode: http.StatusRequestEntityTooLarge} - -// NewErrRequestEntityTooLarge returns a new ErrRequestEntityTooLarge with custom error message -func NewErrRequestEntityTooLarge(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusRequestEntityTooLarge, - } -} - -// WrapErrRequestEntityTooLarge returns a new ErrRequestEntityTooLarge, wrapping an existing error -func WrapErrRequestEntityTooLarge(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusRequestEntityTooLarge, - } -} - -// ErrRequestHeaderFieldsTooLarge is a StatusRequestHeaderFieldsTooLarge Error -var ErrRequestHeaderFieldsTooLarge = ErrorWithStatusCode{statusCode: http.StatusRequestHeaderFieldsTooLarge} - -// NewErrRequestHeaderFieldsTooLarge returns a new ErrRequestHeaderFieldsTooLarge with custom error message -func NewErrRequestHeaderFieldsTooLarge(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusRequestHeaderFieldsTooLarge, - } -} - -// WrapErrRequestHeaderFieldsTooLarge returns a new ErrRequestHeaderFieldsTooLarge, wrapping an existing error -func WrapErrRequestHeaderFieldsTooLarge(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusRequestHeaderFieldsTooLarge, - } -} - -// ErrRequestTimeout is a StatusRequestTimeout Error -var ErrRequestTimeout = ErrorWithStatusCode{statusCode: http.StatusRequestTimeout} - -// NewErrRequestTimeout returns a new ErrRequestTimeout with custom error message -func NewErrRequestTimeout(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusRequestTimeout, - } -} - -// WrapErrRequestTimeout returns a new ErrRequestTimeout, wrapping an existing error -func WrapErrRequestTimeout(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusRequestTimeout, - } -} - -// ErrRequestURITooLong is a StatusRequestURITooLong Error -var ErrRequestURITooLong = ErrorWithStatusCode{statusCode: http.StatusRequestURITooLong} - -// NewErrRequestURITooLong returns a new ErrRequestURITooLong with custom error message -func NewErrRequestURITooLong(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusRequestURITooLong, - } -} - -// WrapErrRequestURITooLong returns a new ErrRequestURITooLong, wrapping an existing error -func WrapErrRequestURITooLong(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusRequestURITooLong, - } -} - -// ErrRequestedRangeNotSatisfiable is a StatusRequestedRangeNotSatisfiable Error -var ErrRequestedRangeNotSatisfiable = ErrorWithStatusCode{statusCode: http.StatusRequestedRangeNotSatisfiable} - -// NewErrRequestedRangeNotSatisfiable returns a new ErrRequestedRangeNotSatisfiable with custom error message -func NewErrRequestedRangeNotSatisfiable(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusRequestedRangeNotSatisfiable, - } -} - -// WrapErrRequestedRangeNotSatisfiable returns a new ErrRequestedRangeNotSatisfiable, wrapping an existing error -func WrapErrRequestedRangeNotSatisfiable(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusRequestedRangeNotSatisfiable, - } -} - -// ErrResetContent is a StatusResetContent Error -var ErrResetContent = ErrorWithStatusCode{statusCode: http.StatusResetContent} - -// NewErrResetContent returns a new ErrResetContent with custom error message -func NewErrResetContent(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusResetContent, - } -} - -// WrapErrResetContent returns a new ErrResetContent, wrapping an existing error -func WrapErrResetContent(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusResetContent, - } -} - -// ErrSeeOther is a StatusSeeOther Error -var ErrSeeOther = ErrorWithStatusCode{statusCode: http.StatusSeeOther} - -// NewErrSeeOther returns a new ErrSeeOther with custom error message -func NewErrSeeOther(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusSeeOther, - } -} - -// WrapErrSeeOther returns a new ErrSeeOther, wrapping an existing error -func WrapErrSeeOther(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusSeeOther, - } -} - -// ErrServiceUnavailable is a StatusServiceUnavailable Error -var ErrServiceUnavailable = ErrorWithStatusCode{statusCode: http.StatusServiceUnavailable} - -// NewErrServiceUnavailable returns a new ErrServiceUnavailable with custom error message -func NewErrServiceUnavailable(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusServiceUnavailable, - } -} - -// WrapErrServiceUnavailable returns a new ErrServiceUnavailable, wrapping an existing error -func WrapErrServiceUnavailable(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusServiceUnavailable, - } -} - -// ErrSwitchingProtocols is a StatusSwitchingProtocols Error -var ErrSwitchingProtocols = ErrorWithStatusCode{statusCode: http.StatusSwitchingProtocols} - -// NewErrSwitchingProtocols returns a new ErrSwitchingProtocols with custom error message -func NewErrSwitchingProtocols(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusSwitchingProtocols, - } -} - -// WrapErrSwitchingProtocols returns a new ErrSwitchingProtocols, wrapping an existing error -func WrapErrSwitchingProtocols(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusSwitchingProtocols, - } -} - -// ErrTeapot is a StatusTeapot Error -var ErrTeapot = ErrorWithStatusCode{statusCode: http.StatusTeapot} - -// NewErrTeapot returns a new ErrTeapot with custom error message -func NewErrTeapot(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusTeapot, - } -} - -// WrapErrTeapot returns a new ErrTeapot, wrapping an existing error -func WrapErrTeapot(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusTeapot, - } -} - -// ErrTemporaryRedirect is a StatusTemporaryRedirect Error -var ErrTemporaryRedirect = ErrorWithStatusCode{statusCode: http.StatusTemporaryRedirect} - -// NewErrTemporaryRedirect returns a new ErrTemporaryRedirect with custom error message -func NewErrTemporaryRedirect(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusTemporaryRedirect, - } -} - -// WrapErrTemporaryRedirect returns a new ErrTemporaryRedirect, wrapping an existing error -func WrapErrTemporaryRedirect(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusTemporaryRedirect, - } -} - -// ErrTooManyRequests is a StatusTooManyRequests Error -var ErrTooManyRequests = ErrorWithStatusCode{statusCode: http.StatusTooManyRequests} - -// NewErrTooManyRequests returns a new ErrTooManyRequests with custom error message -func NewErrTooManyRequests(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusTooManyRequests, - } -} - -// WrapErrTooManyRequests returns a new ErrTooManyRequests, wrapping an existing error -func WrapErrTooManyRequests(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusTooManyRequests, - } -} - -// ErrUnauthorized is a StatusUnauthorized Error -var ErrUnauthorized = ErrorWithStatusCode{statusCode: http.StatusUnauthorized} - -// NewErrUnauthorized returns a new ErrUnauthorized with custom error message -func NewErrUnauthorized(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUnauthorized, - } -} - -// WrapErrUnauthorized returns a new ErrUnauthorized, wrapping an existing error -func WrapErrUnauthorized(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUnauthorized, - } -} - -// ErrUnavailableForLegalReasons is a StatusUnavailableForLegalReasons Error -var ErrUnavailableForLegalReasons = ErrorWithStatusCode{statusCode: http.StatusUnavailableForLegalReasons} - -// NewErrUnavailableForLegalReasons returns a new ErrUnavailableForLegalReasons with custom error message -func NewErrUnavailableForLegalReasons(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUnavailableForLegalReasons, - } -} - -// WrapErrUnavailableForLegalReasons returns a new ErrUnavailableForLegalReasons, wrapping an existing error -func WrapErrUnavailableForLegalReasons(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUnavailableForLegalReasons, - } -} - -// ErrUnprocessableEntity is a StatusUnprocessableEntity Error -var ErrUnprocessableEntity = ErrorWithStatusCode{statusCode: http.StatusUnprocessableEntity} - -// NewErrUnprocessableEntity returns a new ErrUnprocessableEntity with custom error message -func NewErrUnprocessableEntity(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUnprocessableEntity, - } -} - -// WrapErrUnprocessableEntity returns a new ErrUnprocessableEntity, wrapping an existing error -func WrapErrUnprocessableEntity(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUnprocessableEntity, - } -} - -// ErrUnsupportedMediaType is a StatusUnsupportedMediaType Error -var ErrUnsupportedMediaType = ErrorWithStatusCode{statusCode: http.StatusUnsupportedMediaType} - -// NewErrUnsupportedMediaType returns a new ErrUnsupportedMediaType with custom error message -func NewErrUnsupportedMediaType(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUnsupportedMediaType, - } -} - -// WrapErrUnsupportedMediaType returns a new ErrUnsupportedMediaType, wrapping an existing error -func WrapErrUnsupportedMediaType(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUnsupportedMediaType, - } -} - -// ErrUpgradeRequired is a StatusUpgradeRequired Error -var ErrUpgradeRequired = ErrorWithStatusCode{statusCode: http.StatusUpgradeRequired} - -// NewErrUpgradeRequired returns a new ErrUpgradeRequired with custom error message -func NewErrUpgradeRequired(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUpgradeRequired, - } -} - -// WrapErrUpgradeRequired returns a new ErrUpgradeRequired, wrapping an existing error -func WrapErrUpgradeRequired(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUpgradeRequired, - } -} - -// ErrUseProxy is a StatusUseProxy Error -var ErrUseProxy = ErrorWithStatusCode{statusCode: http.StatusUseProxy} - -// NewErrUseProxy returns a new ErrUseProxy with custom error message -func NewErrUseProxy(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusUseProxy, - } -} - -// WrapErrUseProxy returns a new ErrUseProxy, wrapping an existing error -func WrapErrUseProxy(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusUseProxy, - } -} - -// ErrVariantAlsoNegotiates is a StatusVariantAlsoNegotiates Error -var ErrVariantAlsoNegotiates = ErrorWithStatusCode{statusCode: http.StatusVariantAlsoNegotiates} - -// NewErrVariantAlsoNegotiates returns a new ErrVariantAlsoNegotiates with custom error message -func NewErrVariantAlsoNegotiates(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.StatusVariantAlsoNegotiates, - } -} - -// WrapErrVariantAlsoNegotiates returns a new ErrVariantAlsoNegotiates, wrapping an existing error -func WrapErrVariantAlsoNegotiates(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.StatusVariantAlsoNegotiates, - } -} - -var defaultMessages = map[int]string{ - http.StatusAccepted: "Accepted", - http.StatusAlreadyReported: "Already Reported", - http.StatusBadGateway: "Bad Gateway", - http.StatusBadRequest: "Bad Request", - http.StatusConflict: "Conflict", - http.StatusContinue: "Continue", - http.StatusCreated: "Created", - http.StatusExpectationFailed: "Expectation Failed", - http.StatusFailedDependency: "Failed Dependency", - http.StatusForbidden: "Forbidden", - http.StatusFound: "Found", - http.StatusGatewayTimeout: "Gateway Timeout", - http.StatusGone: "Gone", - http.StatusHTTPVersionNotSupported: "HTTPVersion Not Supported", - http.StatusIMUsed: "IMUsed", - http.StatusInsufficientStorage: "Insufficient Storage", - http.StatusInternalServerError: "Internal Server Error", - http.StatusLengthRequired: "Length Required", - http.StatusLocked: "Locked", - http.StatusLoopDetected: "Loop Detected", - http.StatusMethodNotAllowed: "Method Not Allowed", - http.StatusMovedPermanently: "Moved Permanently", - http.StatusMultiStatus: "Multi Status", - http.StatusMultipleChoices: "Multiple Choices", - http.StatusNetworkAuthenticationRequired: "Network Authentication Required", - http.StatusNoContent: "No Content", - http.StatusNonAuthoritativeInfo: "Non Authoritative Info", - http.StatusNotAcceptable: "Not Acceptable", - http.StatusNotExtended: "Not Extended", - http.StatusNotFound: "Not Found", - http.StatusNotImplemented: "Not Implemented", - http.StatusNotModified: "Not Modified", - http.StatusPartialContent: "Partial Content", - http.StatusPaymentRequired: "Payment Required", - http.StatusPermanentRedirect: "Permanent Redirect", - http.StatusPreconditionFailed: "Precondition Failed", - http.StatusPreconditionRequired: "Precondition Required", - http.StatusProcessing: "Processing", - http.StatusProxyAuthRequired: "Proxy Auth Required", - http.StatusRequestEntityTooLarge: "Request Entity Too Large", - http.StatusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large", - http.StatusRequestTimeout: "Request Timeout", - http.StatusRequestURITooLong: "Request URIToo Long", - http.StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", - http.StatusResetContent: "Reset Content", - http.StatusSeeOther: "See Other", - http.StatusServiceUnavailable: "Service Unavailable", - http.StatusSwitchingProtocols: "Switching Protocols", - http.StatusTeapot: "Teapot", - http.StatusTemporaryRedirect: "Temporary Redirect", - http.StatusTooManyRequests: "Too Many Requests", - http.StatusUnauthorized: "Unauthorized", - http.StatusUnavailableForLegalReasons: "Unavailable For Legal Reasons", - http.StatusUnprocessableEntity: "Unprocessable Entity", - http.StatusUnsupportedMediaType: "Unsupported Media Type", - http.StatusUpgradeRequired: "Upgrade Required", - http.StatusUseProxy: "Use Proxy", - http.StatusVariantAlsoNegotiates: "Variant Also Negotiates", -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/event.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/event.go deleted file mode 100644 index 8dd51720..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/event.go +++ /dev/null @@ -1,275 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -import ( - "errors" - "strconv" - "time" -) - -// ErrUnsupported is returned when an unsupported interface on the event is called -var ErrUnsupported = errors.New("Event does not support this interface") - -// ErrTypeConversion is returned when a type conversion for headers / fields fails -var ErrTypeConversion = errors.New("Cannot convert to this type") - -// TriggerInfoProvider provides information about the trigger in which this event originated -type TriggerInfoProvider interface { - - // get the class of source (sync, async, etc) - GetClass() string - - // get specific kind of source (http, rabbit mq, etc) - GetKind() string -} - -// Event allows access to the concrete event -type Event interface { - - // GetID returns the ID of the event - GetID() ID - - // SetID sets the ID of the event - SetID(ID) - - // SetTriggerInfoProvider sets the information about the trigger who triggered this event - SetTriggerInfoProvider(TriggerInfoProvider) - - // GetTriggerInfo retruns a trigger info provider - GetTriggerInfo() TriggerInfoProvider - - // GetContentType returns the content type of the body - GetContentType() string - - // GetBody returns the body of the event - GetBody() []byte - - // GetBodyObject returns the body of the event as an object - GetBodyObject() interface{} - - // GetHeader returns the header by name as an interface{} - GetHeader(string) interface{} - - // GetHeaderByteSlice returns the header by name as a byte slice - GetHeaderByteSlice(string) []byte - - // GetHeaderString returns the header by name as a string - GetHeaderString(string) string - - // GetHeaderInt returns the field by name as an integer - GetHeaderInt(string) (int, error) - - // GetHeaders loads all headers into a map of string / interface{} - GetHeaders() map[string]interface{} - - // GetField returns the field by name as an interface{} - GetField(string) interface{} - - // GetFieldByteSlice returns the field by name as a byte slice - GetFieldByteSlice(string) []byte - - // GetFieldString returns the field by name as a string - GetFieldString(string) string - - // GetFieldInt returns the field by name as an integer - GetFieldInt(string) (int, error) - - // GetFields loads all fields into a map of string / interface{} - GetFields() map[string]interface{} - - // GetTimestamp returns when the event originated - GetTimestamp() time.Time - - // GetPath returns the path of the event - GetPath() string - - // GetURL returns the URL of the event - GetURL() string - - // GetPath returns the method of the event, if applicable - GetMethod() string - - // GetShardID returns the ID of the shard from which this event arrived, if applicable - GetShardID() int - - // GetTotalNumShards returns the total number of shards, if applicable - GetTotalNumShards() int - - // GetType returns the type of event - GetType() string - - // GetTypeVersion returns the version of the type - GetTypeVersion() string - - // GetVersion returns the version of the event - GetVersion() string -} - -// AbstractEvent provides a base implemention of an event -type AbstractEvent struct { - triggerInfoProvider TriggerInfoProvider - id ID - emptyByteArray []byte - emptyHeaders map[string]interface{} - emptyTime time.Time -} - -// SetTriggerInfoProvider sets the information about the trigger who triggered this event -func (ae *AbstractEvent) SetTriggerInfoProvider(triggerInfoProvider TriggerInfoProvider) { - ae.triggerInfoProvider = triggerInfoProvider -} - -// GetTriggerInfo retruns a trigger info provider -func (ae *AbstractEvent) GetTriggerInfo() TriggerInfoProvider { - return ae.triggerInfoProvider -} - -// GetID returns the ID of the event -func (ae *AbstractEvent) GetID() ID { - return ae.id -} - -// SetID sets the ID of the event -func (ae *AbstractEvent) SetID(id ID) { - ae.id = id -} - -// GetContentType returns the content type of the body -func (ae *AbstractEvent) GetContentType() string { - return "" -} - -// GetBody returns the body of the event -func (ae *AbstractEvent) GetBody() []byte { - return ae.emptyByteArray -} - -// GetBodyObject returns the body of the event as an object -func (ae *AbstractEvent) GetBodyObject() interface{} { - return ae.GetBody() -} - -// GetHeader returns the header by name as an interface{} -func (ae *AbstractEvent) GetHeader(key string) interface{} { - return nil -} - -// GetHeaderByteSlice returns the header by name as a byte slice -func (ae *AbstractEvent) GetHeaderByteSlice(key string) []byte { - return ae.emptyByteArray -} - -// GetHeaderString returns the header by name as a string -func (ae *AbstractEvent) GetHeaderString(key string) string { - return string(ae.GetHeaderByteSlice(key)) -} - -// GetHeaderInt returns the field by name as an integer -func (ae *AbstractEvent) GetHeaderInt(key string) (int, error) { - - // try to get header as an interface - headerAsInterface := ae.GetHeader(key) - - // if the header value is not an integer - switch typedHeader := headerAsInterface.(type) { - case int: - return typedHeader, nil - case string: - return strconv.Atoi(typedHeader) - case []byte: - return strconv.Atoi(string(typedHeader)) - - default: - return 0, ErrTypeConversion - } -} - -// GetHeaders loads all headers into a map of string / interface{} -func (ae *AbstractEvent) GetHeaders() map[string]interface{} { - return ae.emptyHeaders -} - -// GetTimestamp returns when the event originated -func (ae *AbstractEvent) GetTimestamp() time.Time { - return ae.emptyTime -} - -// GetPath returns the path of the event -func (ae *AbstractEvent) GetPath() string { - return "" -} - -// GetURL returns the URL of the event -func (ae *AbstractEvent) GetURL() string { - return "" -} - -// GetPath returns the method of the event, if applicable -func (ae *AbstractEvent) GetMethod() string { - return "" -} - -// GetField returns the field by name as an interface{} -func (ae *AbstractEvent) GetField(key string) interface{} { - return nil -} - -// GetFieldByteSlice returns the field by name as a byte slice -func (ae *AbstractEvent) GetFieldByteSlice(key string) []byte { - return nil -} - -// GetFieldString returns the field by name as a string -func (ae *AbstractEvent) GetFieldString(key string) string { - return "" -} - -// GetFieldInt returns the field by name as an integer -func (ae *AbstractEvent) GetFieldInt(key string) (int, error) { - return 0, ErrUnsupported -} - -// GetFields loads all fields into a map of string / interface{} -func (ae *AbstractEvent) GetFields() map[string]interface{} { - return nil -} - -// GetShardID returns the ID of the shard from which this event arrived, if applicable -func (ae *AbstractEvent) GetShardID() int { - return -1 -} - -// GetTotalNumShards returns the total number of shards, if applicable -func (ae *AbstractEvent) GetTotalNumShards() int { - return 0 -} - -// GetType returns the type of event -func (ae *AbstractEvent) GetType() string { - return "" -} - -// GetTypeVersion returns the version of the type -func (ae *AbstractEvent) GetTypeVersion() string { - return "" -} - -// GetVersion returns the version of the event -func (ae *AbstractEvent) GetVersion() string { - return "" -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/gen_errors.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/gen_errors.go deleted file mode 100644 index 017f111b..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/gen_errors.go +++ /dev/null @@ -1,183 +0,0 @@ -// +build ignore - -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Generate errors.go from constants in net/http -package main - -import ( - "flag" - "go/importer" - "log" - "os" - "regexp" - "sort" - "strings" - "text/template" -) - -const ( - statusPrefix = "Status" - - codeTemplateText = `// Automatically generated by gen_errors.go - -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -import ( - "errors" - "fmt" - "net/http" -) - -// WithStatusCode is an error with status code -type WithStatusCode interface { - StatusCode() int -} - -// ErrorWithStatusCode implements both error and WithStatusCode -type ErrorWithStatusCode struct { - error - statusCode int -} - -// GetError returns the underlying error -func (e *ErrorWithStatusCode) GetError() error { - return e.error -} - -// StatusCode returns the status code -func (e *ErrorWithStatusCode) StatusCode() int { - return e.statusCode -} - -// Error returns the error message -func (e ErrorWithStatusCode) Error() string { - if e.error != nil { - return e.error.Error() - } - - message, ok := defaultMessages[e.statusCode] - if !ok { - message = fmt.Sprintf("Unknown error: %d", e.statusCode) - } - - return message -} - -{{range .}} -// {{. | StatusToError}} is a {{.}} Error -var {{. | StatusToError}} = ErrorWithStatusCode{statusCode: http.{{.}}} - -// New{{. | StatusToError}} returns a new {{. | StatusToError}} with custom error message -func New{{. | StatusToError}}(message string) error { - return &ErrorWithStatusCode{ - error: errors.New(message), - statusCode: http.{{.}}, - } -} - -// Wrap{{. | StatusToError}} returns a new {{. | StatusToError}}, wrapping an existing error -func Wrap{{. | StatusToError}}(err error) error { - return &ErrorWithStatusCode{ - error: err, - statusCode: http.{{.}}, - } -} -{{end}} - -var defaultMessages = map[int]string{ -{{- range .}} - http.{{.}}: "{{. | HumanStatus}}", -{{- end}} -} -` -) - -var ( - // Add space between camel case - humanRe = regexp.MustCompile("([a-z])([A-Z])") -) - -// StatusToError convert http status name to error name -// (e.g. "StatusAccepted" -> "ErrAccepted") -func StatusToError(status string) string { - return "Err" + status[len(statusPrefix):] -} - -// HumanStatus returns human formed status -// (e.g. "StatusTooManyRequests" -> "Too Many Requests") -func HumanStatus(status string) string { - return humanRe.ReplaceAllString(status[len(statusPrefix):], "$1 $2") -} - -func main() { - flag.Parse() // Support -h, --help - - pkg, err := importer.Default().Import("net/http") - if err != nil { - log.Fatal(err) - } - - var names []string - for _, name := range pkg.Scope().Names() { - if !strings.HasPrefix(name, statusPrefix) || name == "StatusOK" { - continue - } - - obj := pkg.Scope().Lookup(name) - if obj.Type().String() != "untyped int" { - continue - } - - names = append(names, name) - } - sort.Strings(names) - - funcMap := template.FuncMap{ - "StatusToError": StatusToError, - "HumanStatus": HumanStatus, - } - codeTemplate, err := template.New("").Funcs(funcMap).Parse(codeTemplateText) - if err != nil { - log.Fatal(err) - } - - out, err := os.Create("errors.go") - if err != nil { - log.Fatal(err) - } - - if err := codeTemplate.Execute(out, names); err != nil { - log.Fatal(err) - } -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/response.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/response.go deleted file mode 100644 index 041ce1b8..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/response.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -// Response can be returned from functions, allowing the user to specify various fields -type Response struct { - StatusCode int - ContentType string - Headers map[string]interface{} - Body []byte -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/types.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/types.go deleted file mode 100644 index fdc1a34c..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-sdk-go/types.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nuclio - -// ID is event ID -type ID string diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/LICENSE b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/README.md b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/README.md deleted file mode 100644 index 7fe41427..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# nuclio function wrapper - -Test nuclio functions locally of as part of Go testing - -# Usage, Golang unit testing framework: - -```golang -package main - -import ( - "testing" - "github.com/nuclio/nuclio-test-go" -) - -func TestName(t *testing.T) { - // data binding for V3IO data containers, optional (use nil instead of &data) - data := nutest.DataBind{Name:"db0", Url:"", Container:"x"} - - // Create TestContext and specify the function name, verbose, data - tc, err := nutest.NewTestContext(MyHandler, true, &data ) - if err != nil { - t.Fail() - } - - // Optional, initialize context must have a function in the form: - // InitContext(context *nuclio.Context) error - err = tc.InitContext(InitContext) - if err != nil { - t.Fail() - } - - // Create a new test event - testEvent := nutest.TestEvent{ - Path: "/some/path", - Body: []byte("1234"), - Headers:map[string]interface{}{"first": "string"}, - } - - // invoke the tested function with the new event and log it's output - resp, err := tc.Invoke(&testEvent) - tc.Logger.InfoWith("Run complete", "resp", resp, "err", err) -} -``` - -# Usage, called from another program: - -```golang -package main - -import ( - "github.com/nuclio/nuclio-test-go" -) - -func main() { - // data binding for V3IO data containers, optional (use nil instead of &data) - data := nutest.DataBind{Name:"db0", Url:""", Container:"x"} - - // Create TestContext and specify the function name, verbose, data - tc, err := nutest.NewTestContext(MyHandler, true, &data ) - if err != nil { - panic(err) - } - - // Create a new test event - testEvent := nutest.TestEvent{ - Path: "/some/path", - Body: []byte("1234"), - Headers:map[string]interface{}{"first": "something"}, - } - - // invoke the tested function with the new event and log it's output - resp, err := tc.Invoke(&testEvent) - tc.Logger.InfoWith("Run complete", "resp", resp, "err", err) -} -``` diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/event.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/event.go deleted file mode 100644 index 5d9ed9d9..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/event.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nutest - -import ( - "github.com/nuclio/nuclio-sdk-go" - "github.com/pkg/errors" - "time" -) - -type TestEvent struct { - nuclio.AbstractEvent - - Body []byte - ContentType string - id nuclio.ID - emptyByteArray []byte - Headers map[string]interface{} - Path string - URL string - Method string - Time time.Time -} - -var ErrUnsupported = errors.New("Event does not support this interface") - - -func (te *TestEvent) GetContentType() string { - return te.ContentType -} - -func (te *TestEvent) GetBody() []byte { - return te.Body -} - -func (te *TestEvent) GetPath() string { - return te.Path -} - -func (te *TestEvent) GetURL() string { - return te.URL -} - -func (te *TestEvent) GetMethod() string { - return te.Method -} - -func (te *TestEvent) GetHeaders() map[string]interface{} { - return te.Headers -} - -func (te *TestEvent) GetHeader(key string) interface{} { - return te.Headers[key] -} - -func (te *TestEvent) GetHeaderByteSlice(key string) []byte { - value, found := te.Headers[key] - if !found { - return nil - } - - switch typedValue := value.(type) { - case string: - return []byte(typedValue) - case []byte: - return typedValue - default: - return nil - } -} - -func (te *TestEvent) GetHeaderString(key string) string { - return string(te.GetHeaderByteSlice(key)) -} - -func (te *TestEvent) GetTimestamp() time.Time { - return te.Time -} - diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/nutest.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/nutest.go deleted file mode 100644 index 1cd62731..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/nuclio-test-go/nutest.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nutest - -import ( - "github.com/nuclio/zap" - "github.com/nuclio/nuclio-sdk-go" - "github.com/v3io/v3io-go-http" - "github.com/pkg/errors" - "github.com/nuclio/logger" -) - -func NewTestContext(function func(context *nuclio.Context, event nuclio.Event)(interface {}, error), - verbose bool, data *DataBind) (*TestContext, error) { - newTest := TestContext{Data:data} - if verbose { - newTest.LogLevel = nucliozap.DebugLevel - } else { - newTest.LogLevel = nucliozap.WarnLevel - } - - logger, err := nucliozap.NewNuclioZapCmd("emulator", newTest.LogLevel) - if err != nil { - return nil, errors.Wrap(err, "Failed to create logger") - } - - newTest.Logger = logger - - db := map[string]nuclio.DataBinding{} - if data != nil { - container, err := createContainer(logger, data) - if err != nil { - logger.ErrorWith("Failed to createContainer", "err", err) - return nil, errors.Wrap(err, "Failed to createContainer") - } - - if data.Name == "" { - data.Name = "db0" - } - db[data.Name] = container - } - - newTest.context = nuclio.Context{Logger:logger, DataBinding:db} - newTest.function = function - - - return &newTest, nil -} - -type TestContext struct { - LogLevel nucliozap.Level - Logger logger.Logger - Data *DataBind - context nuclio.Context - function func(context *nuclio.Context, event nuclio.Event)(interface {}, error) -} - -func (tc *TestContext) InitContext(function func(context *nuclio.Context) error) error { - return function(&tc.context) -} - -func (tc *TestContext) Invoke(event nuclio.Event) (interface{}, error) { - - body, err := tc.function(&tc.context, event) - if err != nil { - tc.Logger.ErrorWith("Function execution failed", "err", err) - return body, err - } - tc.Logger.InfoWith("Function completed","output",body) - - return body, err -} - -func createContainer(logger logger.Logger, db *DataBind) (*v3io.Container, error) { - // create context - context, err := v3io.NewContext(logger, db.Url , 8) - if err != nil { - return nil, errors.Wrap(err, "Failed to create client") - } - - // create session - session, err := context.NewSession(db.User, db.Password, "v3test") - if err != nil { - return nil, errors.Wrap(err, "Failed to create session") - } - - // create the container - container, err := session.NewContainer(db.Container) - if err != nil { - return nil, errors.Wrap(err, "Failed to create container") - } - - return container, nil -} - -type DataBind struct { - Name string - Url string - Container string - User string - Password string -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/README.md b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/README.md deleted file mode 100644 index fe510b1a..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/README.md +++ /dev/null @@ -1 +0,0 @@ -# zap diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/buffer.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/buffer.go deleted file mode 100644 index e9d41e3e..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/buffer.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nucliozap - -import ( - "bytes" - "encoding/json" - "fmt" - "time" - - "github.com/pkg/errors" -) - -var ErrBufferPoolAllocationTimeout = errors.New("Timed out waiting for buffer logger") - -// a logger who outputs the records to a buffer -type BufferLogger struct { - encoding string - Logger *NuclioZap - Buffer *bytes.Buffer -} - -func NewBufferLogger(name string, encoding string, level Level) (*BufferLogger, error) { - writer := &bytes.Buffer{} - - // create a logger that is able to capture the output into a buffer. if a request arrives - // and the user wishes to capture the log, this will be used as the logger instead of the default - // logger - newLogger, err := NewNuclioZap(name, - encoding, - nil, - writer, - writer, - level) - - if err != nil { - return nil, errors.Wrap(err, "Failed to create buffer logger") - } - - return &BufferLogger{ - Logger: newLogger, - Buffer: writer, - encoding: encoding, - }, nil -} - -func (bl *BufferLogger) GetJSONString() (string, error) { - if bl.encoding != "json" { - return "", fmt.Errorf("Can only return JSON when encoding is JSON, not %s", bl.encoding) - } - - jsonBody := bl.Buffer.Bytes() - if len(jsonBody) != 0 { - - // remove last comma - jsonBody = jsonBody[:len(jsonBody)-1] - } - - return "[" + string(jsonBody) + "]", nil -} - -func (bl *BufferLogger) GetLogEntries() ([]map[string]interface{}, error) { - jsonBody, err := bl.GetJSONString() - if err != nil { - return nil, errors.Wrap(err, "Failed to get JSON string") - } - - unmarshalledJSONBody := []map[string]interface{}{} - - err = json.Unmarshal([]byte(jsonBody), &unmarshalledJSONBody) - if err != nil { - return nil, errors.Wrap(err, "Failed to unmarshal JSON body") - } - - return unmarshalledJSONBody, nil -} - -// a pool for buffer loggers -type BufferLoggerPool struct { - bufferLoggerChan chan *BufferLogger - defaultAllocateTimeout time.Duration -} - -// a pool of buffer loggers -func NewBufferLoggerPool(numBufferLoggers int, - name string, - encoding string, - level Level) (*BufferLoggerPool, error) { - - // create a channel for the buffer loggers - bufferLoggersChan := make(chan *BufferLogger, numBufferLoggers) - - // create buffer loggers - for bufferLoggerIdx := 0; bufferLoggerIdx < numBufferLoggers; bufferLoggerIdx++ { - newBufferLogger, err := NewBufferLogger(name, encoding, level) - if err != nil { - return nil, errors.Wrap(err, "Failed to create buffer logger") - } - - // shove to channel - bufferLoggersChan <- newBufferLogger - } - - return &BufferLoggerPool{ - bufferLoggerChan: bufferLoggersChan, - defaultAllocateTimeout: 10 * time.Second, - }, nil -} - -func (blp *BufferLoggerPool) Allocate(timeout *time.Duration) (*BufferLogger, error) { - if timeout == nil { - timeout = &blp.defaultAllocateTimeout - } - - select { - case bufferLogger := <-blp.bufferLoggerChan: - - // clear the buffer - bufferLogger.Buffer.Reset() - - return bufferLogger, nil - case <-time.After(*timeout): - return nil, ErrBufferPoolAllocationTimeout - } -} - -func (blp *BufferLoggerPool) Release(bufferLogger *BufferLogger) { - blp.bufferLoggerChan <- bufferLogger -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/logger.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/logger.go deleted file mode 100644 index 30f172bc..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/logger.go +++ /dev/null @@ -1,458 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nucliozap - -import ( - "context" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/mgutz/ansi" - "github.com/nuclio/logger" - "github.com/pavius/zap" - "github.com/pavius/zap/zapcore" -) - -type EncoderConfigJSON struct { - LineEnding string - VarGroupName string - TimeFieldName string - TimeFieldEncoding string -} - -type EncoderConfigConsole struct { -} - -type EncoderConfig struct { - JSON EncoderConfigJSON - Console EncoderConfigConsole -} - -func NewEncoderConfig() *EncoderConfig { - return &EncoderConfig{ - JSON: EncoderConfigJSON{ - LineEnding: ",", - TimeFieldName: "time", - TimeFieldEncoding: "epoch-millis", - }, - } -} - -// Level is logging levels -type Level int8 - -// Predefined logging levels -const ( - DebugLevel Level = Level(zapcore.DebugLevel) - InfoLevel Level = Level(zapcore.InfoLevel) - WarnLevel Level = Level(zapcore.WarnLevel) - ErrorLevel Level = Level(zapcore.ErrorLevel) - DPanicLevel Level = Level(zapcore.DPanicLevel) - PanicLevel Level = Level(zapcore.PanicLevel) - FatalLevel Level = Level(zapcore.FatalLevel) -) - -type writerWrapper struct { - io.Writer -} - -func (w writerWrapper) Sync() error { - return nil -} - -// NuclioZap is a concrete implementation of the nuclio logger interface, using zap -type NuclioZap struct { - *zap.SugaredLogger - atomicLevel zap.AtomicLevel - coloredLevelDebug string - coloredLevelInfo string - coloredLevelWarn string - coloredLevelError string - colorLoggerName func(string) string - customEncoderConfig *EncoderConfig - encoding string -} - -// NewNuclioZap create a configurable logger -func NewNuclioZap(name string, - encoding string, - customEncoderConfig *EncoderConfig, - sink io.Writer, - errSink io.Writer, - level Level) (*NuclioZap, error) { - newNuclioZap := &NuclioZap{ - atomicLevel: zap.NewAtomicLevelAt(zapcore.Level(level)), - customEncoderConfig: customEncoderConfig, - encoding: encoding, - } - - if customEncoderConfig == nil { - customEncoderConfig = NewEncoderConfig() - } - - // create an encoder configuration - encoderConfig := newNuclioZap.getEncoderConfig(encoding, customEncoderConfig) - - // create a sane configuration - config := zap.Config{ - Level: newNuclioZap.atomicLevel, - Development: true, - Encoding: encoding, - EncoderConfig: *encoderConfig, - OutputWriters: []zapcore.WriteSyncer{writerWrapper{sink}}, - ErrorOutputWriters: []zapcore.WriteSyncer{writerWrapper{errSink}}, - DisableStacktrace: true, - } - - newZapLogger, err := config.Build() - if err != nil { - return nil, err - } - - newNuclioZap.SugaredLogger = newZapLogger.Sugar().Named(name) - - // initialize coloring by level - newNuclioZap.initializeColors() - - return newNuclioZap, nil -} - -// We use this istead of testing.Verbose since we don't want to get testing flags in our code -func isVerboseTesting() bool { - for _, arg := range os.Args { - if arg == "-test.v=true" || arg == "-test.v" { - return true - } - } - return false -} - -// NewNuclioZapTest creates a logger pre-configured for tests -func NewNuclioZapTest(name string) (*NuclioZap, error) { - var loggerLevel Level - - if isVerboseTesting() { - loggerLevel = DebugLevel - } else { - loggerLevel = InfoLevel - } - - return NewNuclioZapCmd(name, loggerLevel) -} - -// NewNuclioZapCmd creates a logger pre-configured for commands -func NewNuclioZapCmd(name string, level Level) (*NuclioZap, error) { - return NewNuclioZap(name, "console", nil, os.Stdout, os.Stdout, level) -} - -// GetLevelByName return logging level by name -func GetLevelByName(levelName string) Level { - switch levelName { - case "info": - return Level(zapcore.InfoLevel) - case "warn": - return Level(zapcore.WarnLevel) - case "error": - return Level(zapcore.ErrorLevel) - case "dpanic": - return Level(zapcore.DPanicLevel) - case "panic": - return Level(zapcore.PanicLevel) - case "fatal": - return Level(zapcore.FatalLevel) - default: - return Level(zapcore.DebugLevel) - } -} - -// SetLevel sets the logging level -func (nz *NuclioZap) SetLevel(level Level) { - nz.atomicLevel.SetLevel(zapcore.Level(level)) -} - -// GetLevel returns the current logging level -func (nz *NuclioZap) GetLevel() Level { - return Level(nz.atomicLevel.Level()) -} - -// Errors emits error level log -func (nz *NuclioZap) Error(format interface{}, vars ...interface{}) { - formatString, formatIsString := format.(string) - if formatIsString { - nz.SugaredLogger.Errorf(formatString, vars...) - } else { - nz.SugaredLogger.Error(format) - } -} - -// ErrorCtx emits an unstructured debug log with context -func (nz *NuclioZap) ErrorCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Errorw(nz.getFormatWithContext(ctx, format), nz.prepareVars(vars)...) -} - -// ErrorWith emits error level log with arguments -func (nz *NuclioZap) ErrorWith(format interface{}, vars ...interface{}) { - nz.SugaredLogger.Errorw(format.(string), vars...) -} - -// ErrorWithCtx emits debug level log with arguments -func (nz *NuclioZap) ErrorWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Errorw(format.(string), nz.addContextToVars(ctx, nz.prepareVars(vars))...) -} - -// Warn emits warn level log -func (nz *NuclioZap) Warn(format interface{}, vars ...interface{}) { - formatString, formatIsString := format.(string) - if formatIsString { - nz.SugaredLogger.Warnf(formatString, vars...) - } else { - nz.SugaredLogger.Warn(format) - } -} - -// WarnCtx emits an unstructured debug log with context -func (nz *NuclioZap) WarnCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Warnw(nz.getFormatWithContext(ctx, format), nz.prepareVars(vars)...) -} - -// WarnWith emits warn level log with arguments -func (nz *NuclioZap) WarnWith(format interface{}, vars ...interface{}) { - nz.SugaredLogger.Warnw(format.(string), vars...) -} - -// WarnWithCtx emits debug level log with arguments -func (nz *NuclioZap) WarnWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Warnw(format.(string), nz.addContextToVars(ctx, nz.prepareVars(vars))...) -} - -// Info emits info level log -func (nz *NuclioZap) Info(format interface{}, vars ...interface{}) { - formatString, formatIsString := format.(string) - if formatIsString { - nz.SugaredLogger.Infof(formatString, vars...) - } else { - nz.SugaredLogger.Info(format) - } -} - -// InfoCtx emits an unstructured debug log with context -func (nz *NuclioZap) InfoCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Infow(nz.getFormatWithContext(ctx, format), nz.prepareVars(vars)...) -} - -// InfoWith emits info level log with arguments -func (nz *NuclioZap) InfoWith(format interface{}, vars ...interface{}) { - nz.SugaredLogger.Infow(format.(string), nz.prepareVars(vars)...) -} - -// InfoWithCtx emits debug level log with arguments -func (nz *NuclioZap) InfoWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Infow(format.(string), nz.addContextToVars(ctx, nz.prepareVars(vars))...) -} - -// Debug emits debug level log -func (nz *NuclioZap) Debug(format interface{}, vars ...interface{}) { - formatString, formatIsString := format.(string) - if formatIsString { - nz.SugaredLogger.Debugf(formatString, vars...) - } else { - nz.SugaredLogger.Debug(format) - } -} - -// DebugCtx emits an unstructured debug log with context -func (nz *NuclioZap) DebugCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Debugw(nz.getFormatWithContext(ctx, format), nz.prepareVars(vars)...) -} - -// DebugWith emits debug level log with arguments -func (nz *NuclioZap) DebugWith(format interface{}, vars ...interface{}) { - nz.SugaredLogger.Debugw(format.(string), nz.prepareVars(vars)...) -} - -// DebugWithCtx emits debug level log with arguments -func (nz *NuclioZap) DebugWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - nz.SugaredLogger.Debugw(format.(string), nz.addContextToVars(ctx, nz.prepareVars(vars))...) -} - -// Flush flushes the log -func (nz *NuclioZap) Flush() { - nz.Sync() -} - -// GetChild returned a named child logger -func (nz *NuclioZap) GetChild(name string) logger.Logger { - return &NuclioZap{ - SugaredLogger: nz.Named(name), - encoding: nz.encoding, - customEncoderConfig: nz.customEncoderConfig, - } -} - -func (nz *NuclioZap) encodeLoggerName(loggerName string, enc zapcore.PrimitiveArrayEncoder) { - const maxLoggerNameLength = 25 - actualLoggerNameLength := len(loggerName) - var encodedLoggerName string - - if actualLoggerNameLength >= maxLoggerNameLength { - encodedLoggerName = loggerName[actualLoggerNameLength-maxLoggerNameLength:] - - } else { - encodedLoggerName = strings.Repeat(" ", maxLoggerNameLength-actualLoggerNameLength) + loggerName - } - - // just truncate - enc.AppendString(nz.colorLoggerName(encodedLoggerName)) -} - -func (nz *NuclioZap) encodeStdoutLevel(level zapcore.Level, enc zapcore.PrimitiveArrayEncoder) { - switch level { - case zapcore.InfoLevel: - enc.AppendString(nz.coloredLevelInfo) - return - case zapcore.WarnLevel: - enc.AppendString(nz.coloredLevelWarn) - return - case zapcore.ErrorLevel: - enc.AppendString(nz.coloredLevelError) - return - } - - enc.AppendString(nz.coloredLevelDebug) -} - -func (nz *NuclioZap) encodeStdoutTime(t time.Time, enc zapcore.PrimitiveArrayEncoder) { - enc.AppendString(t.Format("06.01.02 15:04:05.000")) -} - -func (nz *NuclioZap) initializeColors() { - nz.coloredLevelDebug = ansi.Color("(D)", "green") - nz.coloredLevelInfo = ansi.Color("(I)", "blue") - nz.coloredLevelWarn = ansi.Color("(W)", "yellow") - nz.coloredLevelError = ansi.Color("(E)", "red") - - nz.colorLoggerName = ansi.ColorFunc("white") -} - -func (nz *NuclioZap) getEncoderConfig(encoding string, encoderConfig *EncoderConfig) *zapcore.EncoderConfig { - if encoding == "console" { - return &zapcore.EncoderConfig{ - TimeKey: "time", - LevelKey: "level", - NameKey: "name", - CallerKey: "", - MessageKey: "message", - StacktraceKey: "stack", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: nz.encodeStdoutLevel, - EncodeTime: nz.encodeStdoutTime, - EncodeDuration: zapcore.StringDurationEncoder, - EncodeCaller: func(zapcore.EntryCaller, zapcore.PrimitiveArrayEncoder) {}, - EncodeLoggerName: nz.encodeLoggerName, - } - } - - var timeEncoder zapcore.TimeEncoder - switch encoderConfig.JSON.TimeFieldEncoding { - case "iso8601": - timeEncoder = zapcore.ISO8601TimeEncoder - default: - timeEncoder = zapcore.EpochMillisTimeEncoder - } - - return &zapcore.EncoderConfig{ - TimeKey: encoderConfig.JSON.TimeFieldName, - LevelKey: "level", - NameKey: "name", - CallerKey: "", - MessageKey: "message", - StacktraceKey: "stack", - LineEnding: encoderConfig.JSON.LineEnding, - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: timeEncoder, - EncodeDuration: zapcore.SecondsDurationEncoder, - EncodeCaller: func(zapcore.EntryCaller, zapcore.PrimitiveArrayEncoder) {}, - EncodeLoggerName: zapcore.FullLoggerNameEncoder, - } -} - -func (nz *NuclioZap) addContextToVars(ctx context.Context, vars []interface{}) []interface{} { - if ctx == nil { - return vars - } - - // get request ID from context - requestID := ctx.Value("RequestID") - - // if not set, don't add it to vars - if requestID == nil || requestID == "" { - return vars - } - - // create a slice 2 slots larger - varsWithContext := make([]interface{}, 0, len(vars)+2) - varsWithContext = append(varsWithContext, "requestID") - varsWithContext = append(varsWithContext, requestID) - varsWithContext = append(varsWithContext, vars...) - - return varsWithContext -} - -func (nz *NuclioZap) getFormatWithContext(ctx context.Context, format interface{}) string { - formatString := format.(string) - - // get request ID from context - requestID := ctx.Value("RequestID") - - // if not set, don't add it to vars - if requestID == nil || requestID == "" { - return formatString - } - - return formatString + fmt.Sprintf(" (requestID: %s)", requestID) -} - -func (nz *NuclioZap) prepareVars(vars []interface{}) []interface{} { - if nz.encoding != "json" || nz.customEncoderConfig == nil || nz.customEncoderConfig.JSON.VarGroupName == "" { - return vars - } - - // must be an even number of parameters - if len(vars)&0x1 != 0 { - panic("Odd number of logging vars - must be key/value") - } - - formattedVars := "" - - // create key=value pairs - for varIndex := 0; varIndex < len(vars); varIndex += 2 { - formattedVars += fmt.Sprintf("%s=%+v || ", vars[varIndex], vars[varIndex+1]) - } - - // if nothing was created, don't generate a group - if len(formattedVars) == 0 { - return []interface{}{} - } - - return []interface{}{ - nz.customEncoderConfig.JSON.VarGroupName, - formattedVars[:len(formattedVars)-4], - } -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/mux.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/mux.go deleted file mode 100644 index 2747b416..00000000 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/nuclio/zap/mux.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2017 The Nuclio Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nucliozap - -import ( - "context" - - "github.com/nuclio/logger" -) - -// a logger that multiplexes logs towards multiple loggers -type MuxLogger struct { - loggers []logger.Logger -} - -func NewMuxLogger(loggers ...logger.Logger) (*MuxLogger, error) { - return &MuxLogger{loggers: loggers}, nil -} - -func (ml *MuxLogger) SetLoggers(loggers ...logger.Logger) { - ml.loggers = loggers -} - -func (ml *MuxLogger) Error(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.Error(format, vars...) - } -} - -func (ml *MuxLogger) ErrorCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.ErrorCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) Warn(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.Warn(format, vars...) - } -} - -func (ml *MuxLogger) WarnCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.WarnCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) Info(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.Info(format, vars...) - } -} - -func (ml *MuxLogger) InfoCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.InfoCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) Debug(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.Debug(format, vars...) - } -} - -func (ml *MuxLogger) DebugCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.DebugCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) ErrorWith(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.ErrorWith(format, vars...) - } -} - -func (ml *MuxLogger) ErrorWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.ErrorWithCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) WarnWith(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.WarnWith(format, vars...) - } -} - -func (ml *MuxLogger) WarnWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.WarnWithCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) InfoWith(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.InfoWith(format, vars...) - } -} - -func (ml *MuxLogger) InfoWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.InfoWithCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) DebugWith(format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.DebugWith(format, vars...) - } -} - -func (ml *MuxLogger) DebugWithCtx(ctx context.Context, format interface{}, vars ...interface{}) { - for _, logger := range ml.loggers { - logger.DebugWithCtx(ctx, format, vars...) - } -} - -func (ml *MuxLogger) Flush() { -} - -func (ml *MuxLogger) GetChild(name string) logger.Logger { - return ml -} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go index 05157edf..43c9a03d 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go @@ -18,7 +18,6 @@ package v3io // A container interface allows perform actions against a container type Container interface { - // // Container // @@ -77,7 +76,7 @@ type Container interface { PutItem(*PutItemInput, interface{}, chan *Response) (*Request, error) // PutItemSync - PutItemSync(*PutItemInput) error + PutItemSync(*PutItemInput) (*Response, error) // PutItems PutItems(*PutItemsInput, interface{}, chan *Response) (*Request, error) @@ -89,7 +88,7 @@ type Container interface { UpdateItem(*UpdateItemInput, interface{}, chan *Response) (*Request, error) // UpdateItemSync - UpdateItemSync(*UpdateItemInput) error + UpdateItemSync(*UpdateItemInput) (*Response, error) // // Stream diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go index 5a9455df..986ac8b1 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go @@ -16,9 +16,14 @@ limitations under the License. package v3io +import "time" + type Context interface { Container // create a new session NewSession(*NewSessionInput) (Session, error) + + // stops a context + Stop(*time.Duration) error } diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go index 941029bf..f272ce38 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go @@ -67,7 +67,7 @@ func (c *container) PutItem(putItemInput *v3io.PutItemInput, } // PutItemSync -func (c *container) PutItemSync(putItemInput *v3io.PutItemInput) error { +func (c *container) PutItemSync(putItemInput *v3io.PutItemInput) (*v3io.Response, error) { c.populateInputFields(&putItemInput.DataPlaneInput) return c.session.context.PutItemSync(putItemInput) } @@ -95,7 +95,7 @@ func (c *container) UpdateItem(updateItemInput *v3io.UpdateItemInput, } // UpdateItemSync -func (c *container) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) error { +func (c *container) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) (*v3io.Response, error) { c.populateInputFields(&updateItemInput.DataPlaneInput) return c.session.context.UpdateItemSync(updateItemInput) } diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go index 90d50cea..f475d354 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go @@ -32,12 +32,24 @@ import ( // TODO: Request should have a global pool var requestID uint64 +var ErrContextStopped = errors.New("Context stopped") + +type inactivityMonitorRequest int + +const ( + inactivityMonitorRequestReset inactivityMonitorRequest = 0 + inactivityMonitorRequestStop inactivityMonitorRequest = 1 +) + type context struct { - logger logger.Logger - requestChan chan *v3io.Request - httpClient *fasthttp.Client - clusterEndpoints []string - numWorkers int + logger logger.Logger + requestChan chan *v3io.Request + httpClient *fasthttp.Client + clusterEndpoints []string + numWorkers int + inactivityMonitorTimer *time.Timer + inactivityMonitorChan chan inactivityMonitorRequest + inactivityMonitorTimeout time.Duration } func NewClient(tlsConfig *tls.Config, dialTimeout time.Duration) *fasthttp.Client { @@ -48,6 +60,7 @@ func NewClient(tlsConfig *tls.Config, dialTimeout time.Duration) *fasthttp.Clien if dialTimeout == 0 { dialTimeout = fasthttp.DefaultDialTimeout } + dialFunction := func(addr string) (net.Conn, error) { return fasthttp.DialTimeout(addr, dialTimeout) } @@ -74,19 +87,40 @@ func NewContext(parentLogger logger.Logger, client *fasthttp.Client, newContextI } newContext := &context{ - logger: parentLogger.GetChild("context.http"), - httpClient: client, - requestChan: make(chan *v3io.Request, requestChanLen), - numWorkers: numWorkers, + logger: parentLogger.GetChild("context.http"), + httpClient: client, + requestChan: make(chan *v3io.Request, requestChanLen), + numWorkers: numWorkers, + inactivityMonitorTimeout: newContextInput.InactivityTimeout, } for workerIndex := 0; workerIndex < numWorkers; workerIndex++ { go newContext.workerEntry(workerIndex) } + if newContext.inactivityMonitorTimeout != 0 { + newContext.inactivityMonitorChan = make(chan inactivityMonitorRequest, newContext.numWorkers) + newContext.inactivityMonitorTimer = time.NewTimer(newContext.inactivityMonitorTimeout) + + go newContext.inactivityMonitorEntry() + } + + newContext.logger.DebugWith("Created context", + "numWorkers", numWorkers, + "inactivityMonitorTimeout", newContextInput.InactivityTimeout) + return newContext, nil } +// stops a context +func (c *context) Stop(timeout *time.Duration) error { + if c.inactivityMonitorTimer != nil { + c.inactivityMonitorChan <- inactivityMonitorRequestStop + } + + return c.stop("User requested stop", timeout) +} + // create a new session func (c *context) NewSession(newSessionInput *v3io.NewSessionInput) (v3io.Session, error) { return newSession(c.logger, @@ -308,7 +342,7 @@ func (c *context) PutItem(putItemInput *v3io.PutItemInput, } // PutItemSync -func (c *context) PutItemSync(putItemInput *v3io.PutItemInput) error { +func (c *context) PutItemSync(putItemInput *v3io.PutItemInput) (*v3io.Response, error) { var body map[string]interface{} if putItemInput.UpdateMode != "" { body = map[string]interface{}{ @@ -317,7 +351,7 @@ func (c *context) PutItemSync(putItemInput *v3io.PutItemInput) error { } // prepare the query path - _, err := c.putItem(&putItemInput.DataPlaneInput, + response, err := c.putItem(&putItemInput.DataPlaneInput, putItemInput.Path, putItemFunctionName, putItemInput.Attributes, @@ -325,7 +359,13 @@ func (c *context) PutItemSync(putItemInput *v3io.PutItemInput) error { putItemHeaders, body) - return err + mtimeSecs, mtimeNSecs, err := parseMtimeHeader(response) + if err != nil { + return nil, err + } + response.Output = &v3io.PutItemOutput{MtimeSecs: mtimeSecs, MtimeNSecs: mtimeNSecs} + + return response, err } // PutItems @@ -386,8 +426,9 @@ func (c *context) UpdateItem(updateItemInput *v3io.UpdateItemInput, } // UpdateItemSync -func (c *context) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) error { +func (c *context) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) (*v3io.Response, error) { var err error + var response *v3io.Response if updateItemInput.Attributes != nil { @@ -400,7 +441,7 @@ func (c *context) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) error { body["UpdateMode"] = updateItemInput.UpdateMode } - _, err = c.putItem(&updateItemInput.DataPlaneInput, + response, err = c.putItem(&updateItemInput.DataPlaneInput, updateItemInput.Path, putItemFunctionName, updateItemInput.Attributes, @@ -408,18 +449,31 @@ func (c *context) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) error { putItemHeaders, body) + mtimeSecs, mtimeNSecs, err := parseMtimeHeader(response) + if err != nil { + return nil, err + } + response.Output = &v3io.UpdateItemOutput{MtimeSecs: mtimeSecs, MtimeNSecs: mtimeNSecs} + } else if updateItemInput.Expression != nil { - _, err = c.updateItemWithExpression(&updateItemInput.DataPlaneInput, + response, err = c.updateItemWithExpression(&updateItemInput.DataPlaneInput, updateItemInput.Path, updateItemFunctionName, *updateItemInput.Expression, updateItemInput.Condition, updateItemHeaders, updateItemInput.UpdateMode) + + mtimeSecs, mtimeNSecs, err := parseMtimeHeader(response) + if err != nil { + return nil, err + } + response.Output = &v3io.UpdateItemOutput{MtimeSecs: mtimeSecs, MtimeNSecs: mtimeNSecs} + } - return err + return response, err } // GetObject @@ -764,7 +818,6 @@ func (c *context) updateItemWithExpression(dataPlaneInput *v3io.DataPlaneInput, body["UpdateMode"] = updateMode } - if condition != "" { body["ConditionExpression"] = condition } @@ -822,6 +875,11 @@ func (c *context) sendRequest(dataPlaneInput *v3io.DataPlaneInput, var statusCode int var err error + // if there's an inactivity timer, reset it + if c.inactivityMonitorTimer != nil { + c.inactivityMonitorChan <- inactivityMonitorRequestReset + } + if dataPlaneInput.ContainerName == "" { return nil, errors.New("ContainerName must not be empty") } @@ -919,7 +977,7 @@ func (c *context) buildRequestURI(urlString string, containerName string, query if strings.HasSuffix(pathStr, "/") { uri.Path += "/" // retain trailing slash } - uri.RawQuery = strings.Replace(query, " ", "%20", -1) + uri.RawQuery = strings.ReplaceAll(query, " ", "%20") return uri, nil } @@ -1071,63 +1129,79 @@ func (c *context) sendRequestToWorker(input interface{}, func (c *context) workerEntry(workerIndex int) { for { - var response *v3io.Response - var err error - - // read a request request := <-c.requestChan - // according to the input type - switch typedInput := request.Input.(type) { - case *v3io.PutObjectInput: - err = c.PutObjectSync(typedInput) - case *v3io.GetObjectInput: - response, err = c.GetObjectSync(typedInput) - case *v3io.DeleteObjectInput: - err = c.DeleteObjectSync(typedInput) - case *v3io.GetItemInput: - response, err = c.GetItemSync(typedInput) - case *v3io.GetItemsInput: - response, err = c.GetItemsSync(typedInput) - case *v3io.PutItemInput: - err = c.PutItemSync(typedInput) - case *v3io.PutItemsInput: - response, err = c.PutItemsSync(typedInput) - case *v3io.UpdateItemInput: - err = c.UpdateItemSync(typedInput) - case *v3io.CreateStreamInput: - err = c.CreateStreamSync(typedInput) - case *v3io.DeleteStreamInput: - err = c.DeleteStreamSync(typedInput) - case *v3io.GetRecordsInput: - response, err = c.GetRecordsSync(typedInput) - case *v3io.PutRecordsInput: - response, err = c.PutRecordsSync(typedInput) - case *v3io.SeekShardInput: - response, err = c.SeekShardSync(typedInput) - case *v3io.GetContainersInput: - response, err = c.GetContainersSync(typedInput) - case *v3io.GetContainerContentsInput: - response, err = c.GetContainerContentsSync(typedInput) - default: - c.logger.ErrorWith("Got unexpected request type", "type", reflect.TypeOf(request.Input).String()) + if err := c.handleRequest(workerIndex, request); err != nil { + if err == ErrContextStopped { + return + } } + } +} - // TODO: have the sync interfaces somehow use the pre-allocated response - if response != nil { - request.RequestResponse.Response = *response +func (c *context) handleRequest(workerIndex int, request *v3io.Request) error { + var response *v3io.Response + var err error + + // according to the input type + switch typedInput := request.Input.(type) { + case *v3io.PutObjectInput: + err = c.PutObjectSync(typedInput) + case *v3io.GetObjectInput: + response, err = c.GetObjectSync(typedInput) + case *v3io.DeleteObjectInput: + err = c.DeleteObjectSync(typedInput) + case *v3io.GetItemInput: + response, err = c.GetItemSync(typedInput) + case *v3io.GetItemsInput: + response, err = c.GetItemsSync(typedInput) + case *v3io.PutItemInput: + response, err = c.PutItemSync(typedInput) + case *v3io.PutItemsInput: + response, err = c.PutItemsSync(typedInput) + case *v3io.UpdateItemInput: + response, err = c.UpdateItemSync(typedInput) + case *v3io.CreateStreamInput: + err = c.CreateStreamSync(typedInput) + case *v3io.DeleteStreamInput: + err = c.DeleteStreamSync(typedInput) + case *v3io.GetRecordsInput: + response, err = c.GetRecordsSync(typedInput) + case *v3io.PutRecordsInput: + response, err = c.PutRecordsSync(typedInput) + case *v3io.SeekShardInput: + response, err = c.SeekShardSync(typedInput) + case *v3io.GetContainersInput: + response, err = c.GetContainersSync(typedInput) + case *v3io.GetContainerContentsInput: + response, err = c.GetContainerContentsSync(typedInput) + case *v3io.StopContextInput: + response = &v3io.Response{ + Output: &v3io.StopContextOutput{ + WorkerIndex: workerIndex, + }, } + err = ErrContextStopped + default: + c.logger.ErrorWith("Got unexpected request type", "type", reflect.TypeOf(request.Input).String()) + } - response = &request.RequestResponse.Response + // TODO: have the sync interfaces somehow use the pre-allocated response + if response != nil { + request.RequestResponse.Response = *response + } - response.ID = request.ID - response.Error = err - response.RequestResponse = request.RequestResponse - response.Context = request.Context + response = &request.RequestResponse.Response - // write to response channel - request.ResponseChan <- &request.RequestResponse.Response - } + response.ID = request.ID + response.Error = err + response.RequestResponse = request.RequestResponse + response.Context = request.Context + + // write to response channel + request.ResponseChan <- &request.RequestResponse.Response + + return err } func readAllCapnpMessages(reader io.Reader) []*capnp.Message { @@ -1204,7 +1278,7 @@ func decodeCapnpAttributes(keyValues node_common_capnp.VnObjectItemsGetMappedKey func (c *context) getItemsParseJSONResponse(response *v3io.Response, getItemsInput *v3io.GetItemsInput) (*v3io.GetItemsOutput, error) { getItemsResponse := struct { - Items []map[string]map[string]interface{} + Items []map[string]map[string]interface{} NextMarker string LastItemIncluded string }{} @@ -1345,3 +1419,108 @@ func (c *context) getItemsParseCAPNPResponse(response *v3io.Response, withWildca } return &getItemsOutput, nil } + +func (c *context) inactivityMonitorEntry() { + c.logger.DebugWith("Inactivity monitor starting", + "timeout", c.inactivityMonitorTimeout) + + inactivityMonitorTimerExpired := false + + for !inactivityMonitorTimerExpired { + select { + case request := <-c.inactivityMonitorChan: + switch request { + case inactivityMonitorRequestStop: + c.logger.Debug("Inactivity monitor requested to stop") + return + case inactivityMonitorRequestReset: + c.inactivityMonitorTimer.Reset(c.inactivityMonitorTimeout) + } + + case <-c.inactivityMonitorTimer.C: + inactivityMonitorTimerExpired = true + } + } + + // force stop + c.stop("Inactivity timout expired", nil) // nolint: errcheck +} + +func (c *context) stop(reason string, timeout *time.Duration) error { + var workerStoppedChan chan *v3io.Response + + timeoutStr := "None" + if timeout != nil { + timeoutStr = timeout.String() + } + + c.logger.DebugWith("Stopping context", + "reason", reason, + "timeout", timeoutStr) + + workerStoppedChan = make(chan *v3io.Response, c.numWorkers) + + // it's guaranteed that a single worker will not read two messages from the queue, so + // each worker should receive a single stop request + for workerIdx := 0; workerIdx < c.numWorkers; workerIdx++ { + _, err := c.sendRequestToWorker(&v3io.StopContextInput{Reason: reason}, + nil, + workerStoppedChan) + + if err != nil { + return errors.Wrap(err, "Failed to send request to worker") + } + } + + // if timeout is set, wait for all workers to stop + if timeout != nil { + deadline := time.After(*timeout) + workersStopped := 0 + + // while not all workers stopped, wait for them to stop + for workersStopped < c.numWorkers { + select { + case <-workerStoppedChan: + workersStopped++ + case <-deadline: + return errors.New("Timed out waiting for context to stop") + } + } + } + + c.logger.DebugWith("Context stopped") + + return nil +} + +// parsing the mtime from a header of the form `__mtime_secs==1581605100 and __mtime_nsecs==498349956` +func parseMtimeHeader(response *v3io.Response) (int, int, error) { + var mtimeSecs, mtimeNSecs int + var err error + + mtimeHeader := string(response.HeaderPeek("X-v3io-transaction-verifier")) + for _, expression := range strings.Split(mtimeHeader, "and") { + mtimeParts := strings.Split(expression, "==") + mtimeType := strings.TrimSpace(mtimeParts[0]) + if mtimeType == "__mtime_secs" { + mtimeSecs, err = trimAndParseInt(mtimeParts[1]) + if err != nil { + return 0, 0, err + } + } else if mtimeType == "__mtime_nsecs" { + mtimeNSecs, err = trimAndParseInt(mtimeParts[1]) + if err != nil { + return 0, 0, err + } + } else { + return 0, 0, fmt.Errorf("failed to parse 'X-v3io-transaction-verifier', unexpected symbol '%v' ", mtimeType) + } + } + + return mtimeSecs, mtimeNSecs, nil +} + +func trimAndParseInt(str string) (int, error) { + trimmed := strings.TrimSpace(str) + return strconv.Atoi(trimmed) +} diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go index 56662277..599000d5 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go @@ -31,9 +31,18 @@ import ( // type NewContextInput struct { - Client *fasthttp.Client - NumWorkers int - RequestChanLen int + Client *fasthttp.Client + NumWorkers int + RequestChanLen int + InactivityTimeout time.Duration +} + +type StopContextInput struct { + Reason string +} + +type StopContextOutput struct { + WorkerIndex int } type NewSessionInput struct { @@ -192,6 +201,12 @@ type PutItemInput struct { UpdateMode string } +type PutItemOutput struct { + DataPlaneInput + MtimeSecs int + MtimeNSecs int +} + type PutItemsInput struct { DataPlaneInput Path string @@ -214,6 +229,12 @@ type UpdateItemInput struct { UpdateMode string } +type UpdateItemOutput struct { + DataPlaneInput + MtimeSecs int + MtimeNSecs int +} + type GetItemInput struct { DataPlaneInput Path string diff --git a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/modules.txt b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/modules.txt index 8020c644..fdafad5e 100644 --- a/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/modules.txt +++ b/functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/modules.txt @@ -65,7 +65,7 @@ github.com/stretchr/testify/require # github.com/v3io/frames v0.6.8-v0.9.11 github.com/v3io/frames github.com/v3io/frames/pb -# github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6 +# github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d github.com/v3io/v3io-go/pkg/dataplane github.com/v3io/v3io-go/pkg/errors github.com/v3io/v3io-go/pkg/dataplane/http From 2d1731abf68f08cc44501cdbcff80114e110774a Mon Sep 17 00:00:00 2001 From: iguazio-deploy Date: Mon, 24 Feb 2020 17:22:53 +0000 Subject: [PATCH 02/27] Updated TSDB to v0.9.15 --- .../github.com/v3io/v3io-tsdb/Jenkinsfile | 310 ++--- .../vendor/github.com/v3io/v3io-tsdb/Makefile | 22 +- .../v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go | 1 + .../vendor/github.com/v3io/v3io-tsdb/go.mod | 12 +- .../vendor/github.com/v3io/v3io-tsdb/go.sum | 31 +- .../internal/pkg/performance/metrics.go | 6 +- .../v3io/v3io-tsdb/pkg/aggregate/aggregate.go | 7 +- .../v3io/v3io-tsdb/pkg/aggregate/iterator.go | 50 +- .../v3io/v3io-tsdb/pkg/appender/appender.go | 49 +- .../v3io/v3io-tsdb/pkg/appender/ingest.go | 20 +- .../v3io/v3io-tsdb/pkg/appender/store.go | 17 +- .../v3io/v3io-tsdb/pkg/chunkenc/vartype.go | 49 +- .../v3io/v3io-tsdb/pkg/chunkenc/xor.go | 13 +- .../v3io/v3io-tsdb/pkg/config/config.go | 33 +- .../v3io-tsdb/pkg/formatter/formatters.go | 4 +- .../v3io/v3io-tsdb/pkg/formatter/type.go | 10 +- .../v3io/v3io-tsdb/pkg/partmgr/partmgr.go | 120 +- .../v3io-tsdb/pkg/pquerier/chunkIterator.go | 5 +- .../pkg/pquerier/chunkIterator_test.go | 19 +- .../v3io/v3io-tsdb/pkg/pquerier/collector.go | 13 +- .../v3io/v3io-tsdb/pkg/pquerier/frames.go | 99 +- .../client_aggregates_integration_test.go | 29 +- ...oss_series_aggregation_integration_test.go | 27 +- .../dataframe_query_integration_test.go | 573 ++++++++- .../downsample_query_integration_test.go | 4 +- .../integration_test_basic_test.go | 18 + .../query_sql_integration_test.go | 6 +- .../raw_query_integration_test.go | 40 +- .../server_aggregates_integration_test.go | 15 +- .../windowed_aggregation_integration_test.go | 15 +- .../v3io/v3io-tsdb/pkg/pquerier/querier.go | 2 +- .../v3io/v3io-tsdb/pkg/pquerier/select.go | 19 +- .../pkg/pquerier/selectQueryContext_test.go | 2 +- .../v3io/v3io-tsdb/pkg/pquerier/sql_parser.go | 5 +- .../v3io/v3io-tsdb/pkg/querier/series.go | 10 +- .../v3io/v3io-tsdb/pkg/querier/seriesset.go | 4 +- .../pkg/tsdb/delete_integration_test.go | 1141 ----------------- .../v3io/v3io-tsdb/pkg/tsdb/schema/schema.go | 16 +- .../v3io-tsdb/pkg/tsdb/tsdbtest/config.go | 2 + .../v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go | 114 +- .../v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go | 571 +-------- .../pkg/tsdb/v3iotsdb_integration_test.go | 365 +++++- .../v3io/v3io-tsdb/pkg/tsdbctl/add.go | 12 +- .../v3io/v3io-tsdb/pkg/tsdbctl/check.go | 59 +- .../v3io/v3io-tsdb/pkg/tsdbctl/delete.go | 40 +- .../v3io/v3io-tsdb/pkg/tsdbctl/query.go | 7 +- .../v3io/v3io-tsdb/pkg/tsdbctl/tsdbctl.go | 13 +- .../v3io-tsdb/pkg/tsdbctl/tsdbctl_test.go | 14 +- .../v3io/v3io-tsdb/pkg/utils/asynciter.go | 13 +- .../v3io/v3io-tsdb/pkg/utils/container.go | 19 +- .../v3io/v3io-tsdb/pkg/utils/labels.go | 22 +- .../v3io/v3io-tsdb/pkg/utils/misc.go | 23 +- .../v3io/v3io-tsdb/pkg/utils/timeutils.go | 4 +- .../v3io/v3io-tsdb/pkg/utils/validators.go | 8 +- .../BenchmarkIngestWithNuclio_test.go | 2 +- .../test/benchmark/BenchmarkIngest_test.go | 14 +- .../vendor/github.com/v3io/frames/.gitignore | 150 +++ .../vendor/github.com/v3io/frames/Jenkinsfile | 12 +- .../vendor/github.com/v3io/frames/Makefile | 17 +- .../vendor/github.com/v3io/frames/README.md | 548 +++++--- .../vendor/github.com/v3io/frames/builder.go | 9 + .../vendor/github.com/v3io/frames/config.go | 56 +- .../vendor/github.com/v3io/frames/frame.go | 4 + .../github.com/v3io/frames/frames.proto | 14 +- .../vendor/github.com/v3io/frames/go.mod | 16 +- .../vendor/github.com/v3io/frames/go.sum | 25 +- .../github.com/v3io/frames/pb/frames.pb.go | 339 ++--- .../github.com/v3io/frames/pb/methods.go | 21 - .../vendor/github.com/v3io/frames/types.go | 2 + .../v3io/v3io-go/pkg/dataplane/container.go | 5 +- .../v3io/v3io-go/pkg/dataplane/context.go | 5 - .../v3io-go/pkg/dataplane/http/container.go | 4 +- .../v3io-go/pkg/dataplane/http/context.go | 313 +---- .../v3io/v3io-go/pkg/dataplane/types.go | 27 +- .../v3io/v3io-tsdb/vendor/modules.txt | 4 +- .../github.com/v3io/v3io-tsdb/Jenkinsfile | 310 ++--- .../vendor/github.com/v3io/v3io-tsdb/Makefile | 22 +- .../v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go | 1 + .../vendor/github.com/v3io/v3io-tsdb/go.mod | 13 +- .../vendor/github.com/v3io/v3io-tsdb/go.sum | 31 +- .../internal/pkg/performance/metrics.go | 6 +- .../v3io/v3io-tsdb/pkg/aggregate/aggregate.go | 7 +- .../v3io/v3io-tsdb/pkg/aggregate/iterator.go | 50 +- .../v3io/v3io-tsdb/pkg/appender/appender.go | 49 +- .../v3io/v3io-tsdb/pkg/appender/ingest.go | 20 +- .../v3io/v3io-tsdb/pkg/appender/store.go | 17 +- .../v3io/v3io-tsdb/pkg/chunkenc/vartype.go | 49 +- .../v3io/v3io-tsdb/pkg/chunkenc/xor.go | 13 +- .../v3io/v3io-tsdb/pkg/config/config.go | 33 +- .../v3io-tsdb/pkg/formatter/formatters.go | 4 +- .../v3io/v3io-tsdb/pkg/formatter/type.go | 10 +- .../v3io/v3io-tsdb/pkg/partmgr/partmgr.go | 120 +- .../v3io-tsdb/pkg/pquerier/chunkIterator.go | 5 +- .../pkg/pquerier/chunkIterator_test.go | 19 +- .../v3io/v3io-tsdb/pkg/pquerier/collector.go | 13 +- .../v3io/v3io-tsdb/pkg/pquerier/frames.go | 99 +- .../client_aggregates_integration_test.go | 29 +- ...oss_series_aggregation_integration_test.go | 27 +- .../dataframe_query_integration_test.go | 573 ++++++++- .../downsample_query_integration_test.go | 4 +- .../integration_test_basic_test.go | 18 + .../query_sql_integration_test.go | 6 +- .../raw_query_integration_test.go | 40 +- .../server_aggregates_integration_test.go | 15 +- .../windowed_aggregation_integration_test.go | 15 +- .../v3io/v3io-tsdb/pkg/pquerier/querier.go | 2 +- .../v3io/v3io-tsdb/pkg/pquerier/select.go | 19 +- .../pkg/pquerier/selectQueryContext_test.go | 2 +- .../v3io/v3io-tsdb/pkg/pquerier/sql_parser.go | 5 +- .../v3io/v3io-tsdb/pkg/querier/series.go | 10 +- .../v3io/v3io-tsdb/pkg/querier/seriesset.go | 4 +- .../pkg/tsdb/delete_integration_test.go | 1141 ----------------- .../v3io/v3io-tsdb/pkg/tsdb/schema/schema.go | 16 +- .../v3io-tsdb/pkg/tsdb/tsdbtest/config.go | 2 + .../v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go | 114 +- .../v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go | 571 +-------- .../pkg/tsdb/v3iotsdb_integration_test.go | 365 +++++- .../v3io/v3io-tsdb/pkg/tsdbctl/add.go | 12 +- .../v3io/v3io-tsdb/pkg/tsdbctl/check.go | 59 +- .../v3io/v3io-tsdb/pkg/tsdbctl/delete.go | 40 +- .../v3io/v3io-tsdb/pkg/tsdbctl/query.go | 7 +- .../v3io/v3io-tsdb/pkg/tsdbctl/tsdbctl.go | 13 +- .../v3io-tsdb/pkg/tsdbctl/tsdbctl_test.go | 14 +- .../v3io/v3io-tsdb/pkg/utils/asynciter.go | 13 +- .../v3io/v3io-tsdb/pkg/utils/container.go | 19 +- .../v3io/v3io-tsdb/pkg/utils/labels.go | 22 +- .../v3io/v3io-tsdb/pkg/utils/misc.go | 23 +- .../v3io/v3io-tsdb/pkg/utils/timeutils.go | 4 +- .../v3io/v3io-tsdb/pkg/utils/validators.go | 8 +- .../BenchmarkIngestWithNuclio_test.go | 2 +- .../test/benchmark/BenchmarkIngest_test.go | 14 +- .../vendor/github.com/v3io/frames/.gitignore | 150 +++ .../vendor/github.com/v3io/frames/Jenkinsfile | 12 +- .../vendor/github.com/v3io/frames/Makefile | 17 +- .../vendor/github.com/v3io/frames/README.md | 548 +++++--- .../vendor/github.com/v3io/frames/builder.go | 9 + .../vendor/github.com/v3io/frames/config.go | 56 +- .../vendor/github.com/v3io/frames/frame.go | 4 + .../github.com/v3io/frames/frames.proto | 14 +- .../vendor/github.com/v3io/frames/go.mod | 16 +- .../vendor/github.com/v3io/frames/go.sum | 25 +- .../github.com/v3io/frames/pb/frames.pb.go | 339 ++--- .../github.com/v3io/frames/pb/methods.go | 21 - .../vendor/github.com/v3io/frames/types.go | 2 + .../v3io/v3io-go/pkg/dataplane/container.go | 5 +- .../v3io/v3io-go/pkg/dataplane/context.go | 5 - .../v3io-go/pkg/dataplane/http/container.go | 4 +- .../v3io-go/pkg/dataplane/http/context.go | 313 +---- .../v3io/v3io-go/pkg/dataplane/types.go | 27 +- .../v3io/v3io-tsdb/vendor/modules.txt | 6 +- 150 files changed, 5294 insertions(+), 6087 deletions(-) delete mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go create mode 100644 functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/.gitignore delete mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go create mode 100644 functions/query/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/.gitignore diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Jenkinsfile b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Jenkinsfile index a0599447..dda6c5ec 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Jenkinsfile +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Jenkinsfile @@ -93,6 +93,7 @@ def build_nuclio(V3IO_TSDB_VERSION, internal_status="stable") { GO111MODULE=on go mod vendor rm -rf .git vendor/github.com/nuclio vendor/github.com/${git_project_upstream_user}/frames/vendor/golang.org/x/net vendor/golang.org/x/net """ + sh("chown 1000:1000 ./ -R") } } }, @@ -113,6 +114,7 @@ def build_nuclio(V3IO_TSDB_VERSION, internal_status="stable") { GO111MODULE=on go mod vendor rm -rf .git vendor/github.com/nuclio vendor/github.com/${git_project_upstream_user}/frames/vendor/golang.org/x/net vendor/golang.org/x/net """ + sh("chown 1000:1000 ./ -R") } } } @@ -130,15 +132,16 @@ def build_nuclio(V3IO_TSDB_VERSION, internal_status="stable") { git add functions/ingest/vendor/github.com functions/query/vendor/github.com; """ try { - sh("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") + common.shellc("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") } catch (err) { echo "Can not commit" + echo err } try { if ( "${internal_status}" == "unstable" ) { - sh("git push origin development") + common.shellc("git push origin development") } else { - sh("git push origin master") + common.shellc("git push origin master") } } catch (err) { echo "Can not push code" @@ -153,7 +156,7 @@ def build_nuclio(V3IO_TSDB_VERSION, internal_status="stable") { } } -def build_prometheus(V3IO_TSDB_VERSION, internal_status="stable") { +def build_prometheus(V3IO_TSDB_VERSION, FRAMES_VERSION, internal_status="stable") { withCredentials([ usernamePassword(credentialsId: git_deploy_user, passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME'), string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') @@ -181,9 +184,11 @@ def build_prometheus(V3IO_TSDB_VERSION, internal_status="stable") { dir("${BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") { if("${git_project_user}" != "${git_project_upstream_user}") { sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/v3io-tsdb=github.com/${git_project_user}/v3io-tsdb@${V3IO_TSDB_VERSION}") + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/frames=github.com/${git_project_user}/frames@${FRAMES_VERSION}") sh("GO111MODULE=on go get") } else { sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/v3io-tsdb=github.com/${git_project_upstream_user}/v3io-tsdb@${V3IO_TSDB_VERSION}") + sh("GO111MODULE=on go mod edit -replace github.com/${git_project_upstream_user}/frames=github.com/${git_project_upstream_user}/frames@${FRAMES_VERSION}") } sh("GO111MODULE=on go mod vendor") sh("chown 1000:1000 ./ -R") @@ -199,18 +204,19 @@ def build_prometheus(V3IO_TSDB_VERSION, internal_status="stable") { git config --global user.name '${GIT_USERNAME}' git remote rm origin git remote add origin https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/${git_project_user}/${git_project}.git - git add go.mod go.sum vendor/modules.txt; + git add go.mod go.sum vendor/modules.txt vendor; """ try { - sh("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") + common.shellc("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") } catch (err) { echo "Can not commit" + echo err } try { if ( "${internal_status}" == "unstable" ) { - sh("git push origin development") + common.shellc("git push origin development") } else { - sh("git push origin master") + common.shellc("git push origin master") } } catch (err) { echo "Can not push code" @@ -274,15 +280,16 @@ def build_frames(V3IO_TSDB_VERSION, internal_status="stable") { git add go.mod go.sum vendor/modules.txt; """ try { - sh("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") + common.shellc("git commit -m 'Updated TSDB to ${V3IO_TSDB_VERSION}'") } catch (err) { echo "Can not commit" + echo err } try { if ( "${internal_status}" == "unstable" ) { - sh("git push origin development") + common.shellc("git push origin development") } else { - sh("git push origin master") + common.shellc("git push origin master") } } catch (err) { echo "Can not push code" @@ -297,8 +304,94 @@ def build_frames(V3IO_TSDB_VERSION, internal_status="stable") { } } +def wait_for_release(V3IO_TSDB_VERSION, tasks_list) { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + if (V3IO_TSDB_VERSION != "unstable") { + stage('waiting for prereleases moved to releases') { + container('jnlp') { + i = 0 + def success_count = 0 + + while (true) { + def done_count = 0 + + echo "attempt #${i}" + tasks_list.each { project, status -> + if (status == null) { + def RELEASE_SUCCESS = sh( + script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/releases/tags/${next_versions[project]} | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[\"prerelease\"]' | if grep -iq false; then echo 'release'; else echo 'prerelease'; fi", + returnStdout: true + ).trim() + + echo "${project} is ${RELEASE_SUCCESS}" + if (RELEASE_SUCCESS != null && RELEASE_SUCCESS == 'release') { + tasks_list.putAt(project, true) + done_count++ + success_count++ + } else { + def TAG_SHA = sh( + script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/git/refs/tags/${next_versions[project]} | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[\"object\"][\"sha\"]'", + returnStdout: true + ).trim() + + if (TAG_SHA != null) { + def COMMIT_STATUS = sh( + script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/commits/${TAG_SHA}/statuses | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[0][\"state\"]' | if grep -iq error; then echo 'error'; else echo 'ok'; fi", + returnStdout: true + ).trim() + if (COMMIT_STATUS != null && COMMIT_STATUS == 'error') { + tasks_list.putAt(project, false) + done_count++ + } + } + } + } else { + done_count++ + } + } + if (success_count >= tasks_list.size()) { + echo "all releases have been successfully completed" + break + } + + if (done_count >= tasks_list.size() || i++ > attempts) { + def failed = [] + def notcompleted = [] + def error_string = '' + tasks_list.each { project, status -> + if (status == null) { + notcompleted += project + } else if (status == false) { + failed += project + } + } + if (failed.size()) { + error_string += failed.join(',') + ' have been failed :_(. ' + } + if (notcompleted.size()) { + error_string += notcompleted.join(',') + ' have been not completed :(. ' + } + error(error_string) + break + } + + sleep(60) + } + } + } + } else { + stage('info') { + echo("Unstable tsdb doesn't trigger tsdb-nuclio and prometheus") + } + } + } +} + podTemplate(label: "${git_project}-${label}", inheritFrom: "jnlp-docker-golang") { def MAIN_TAG_VERSION + def FRAMES_NEXT_VERSION def next_versions = ['prometheus':null, 'tsdb-nuclio':null] pipelinex = library(identifier: 'pipelinex@refs', retriever: modernSCM( @@ -386,6 +479,7 @@ podTemplate(label: "${git_project}-${label}", inheritFrom: "jnlp-docker-golang") CURRENT_VERSION = github.get_short_tag_version("frames", git_project_user, GIT_TOKEN) echo "$CURRENT_VERSION" NEXT_VERSION = "${CURRENT_VERSION}-${MAIN_TAG_VERSION}" + FRAMES_NEXT_VERSION = NEXT_VERSION next_versions.putAt("frames", NEXT_VERSION) } } @@ -411,65 +505,6 @@ podTemplate(label: "${git_project}-${label}", inheritFrom: "jnlp-docker-golang") } } } - }, - 'prometheus': { - podTemplate(label: "v3io-tsdb-prometheus-${label}", inheritFrom: "jnlp-docker-golang") { - node("v3io-tsdb-prometheus-${label}") { - withCredentials([ - string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') - ]) { - def TAG_VERSION - def NEXT_VERSION - - if (MAIN_TAG_VERSION != "unstable") { - stage('get current version') { - container('jnlp') { - sh """ - cd ${BUILD_FOLDER} - git clone https://${GIT_TOKEN}@github.com/${git_project_user}/prometheus.git src/github.com/prometheus/prometheus - """ - - TAG_VERSION = sh( - script: "cat ${BUILD_FOLDER}/src/github.com/prometheus/prometheus/VERSION", - returnStdout: true - ).trim() - } - } - - if (TAG_VERSION) { - stage('get previous release version') { - container('jnlp') { - CURRENT_VERSION = github.get_current_tag_version("prometheus", git_project_user, GIT_TOKEN) - echo "$CURRENT_VERSION" - version_list=CURRENT_VERSION.split('-') - NEXT_VERSION = "v${TAG_VERSION}-${version_list[1]}-${MAIN_TAG_VERSION}" - echo "$NEXT_VERSION" - next_versions.putAt('prometheus', NEXT_VERSION) - } - } - - build_prometheus(MAIN_TAG_VERSION, "unstable") - build_prometheus(MAIN_TAG_VERSION) - - stage('create prometheus prerelease') { - container('jnlp') { - echo "Triggered prometheus development will be builded with last tsdb stable version" - github.delete_release("prometheus", git_project_user, "unstable", GIT_TOKEN) - github.create_prerelease("prometheus", git_project_user, "unstable", GIT_TOKEN, "development") - - echo "Trigger prometheus ${NEXT_VERSION} with tsdb ${MAIN_TAG_VERSION}" - github.create_prerelease("prometheus", git_project_user, NEXT_VERSION, GIT_TOKEN) - } - } - } - } else { - stage('info') { - echo("Unstable tsdb doesn't trigger prometheus") - } - } - } - } - } } ) } @@ -477,89 +512,76 @@ podTemplate(label: "${git_project}-${label}", inheritFrom: "jnlp-docker-golang") } node("${git_project}-${label}") { - withCredentials([ - string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') - ]) { - if (MAIN_TAG_VERSION != "unstable") { - stage('waiting for prereleases moved to releases') { - container('jnlp') { - i = 0 - def tasks_list = ['prometheus': null, 'tsdb-nuclio': null] - def success_count = 0 - - while (true) { - def done_count = 0 - - echo "attempt #${i}" - tasks_list.each { project, status -> - if (status == null) { - def RELEASE_SUCCESS = sh( - script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/releases/tags/${next_versions[project]} | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[\"prerelease\"]' | if grep -iq false; then echo 'release'; else echo 'prerelease'; fi", - returnStdout: true - ).trim() + wait_for_release(MAIN_TAG_VERSION, ['tsdb-nuclio': null]) + } - echo "${project} is ${RELEASE_SUCCESS}" - if (RELEASE_SUCCESS != null && RELEASE_SUCCESS == 'release') { - tasks_list.putAt(project, true) - done_count++ - success_count++ - } else { - def TAG_SHA = sh( - script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/git/refs/tags/${next_versions[project]} | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[\"object\"][\"sha\"]'", - returnStdout: true - ).trim() - - if (TAG_SHA != null) { - def COMMIT_STATUS = sh( - script: "curl --silent -H \"Content-Type: application/json\" -H \"Authorization: token ${GIT_TOKEN}\" -X GET https://api.github.com/repos/${git_project_user}/${project}/commits/${TAG_SHA}/statuses | python -c 'import json,sys;obj=json.load(sys.stdin);print obj[0][\"state\"]' | if grep -iq error; then echo 'error'; else echo 'ok'; fi", - returnStdout: true - ).trim() - if (COMMIT_STATUS != null && COMMIT_STATUS == 'error') { - tasks_list.putAt(project, false) - done_count++ - } - } - } - } else { - done_count++ - } - } - if (success_count >= tasks_list.size()) { - echo "all releases have been successfully completed" - break - } + // prometheus moved last cos need frames version to build + podTemplate(label: "v3io-tsdb-prometheus-${label}", inheritFrom: "jnlp-docker-golang") { + node("v3io-tsdb-prometheus-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { + def TAG_VERSION + def NEXT_VERSION - if (done_count >= tasks_list.size() || i++ > attempts) { - def failed = [] - def notcompleted = [] - def error_string = '' - tasks_list.each { project, status -> - if (status == null) { - notcompleted += project - } else if (status == false) { - failed += project - } - } - if (failed.size()) { - error_string += failed.join(',') + ' have been failed :_(. ' - } - if (notcompleted.size()) { - error_string += notcompleted.join(',') + ' have been not completed :(. ' - } - error(error_string) - break + if (MAIN_TAG_VERSION != "unstable") { + stage('get current version') { + container('jnlp') { + sh """ + cd ${BUILD_FOLDER} + git clone https://${GIT_TOKEN}@github.com/${git_project_user}/prometheus.git src/github.com/prometheus/prometheus + """ + + TAG_VERSION = sh( + script: "cat ${BUILD_FOLDER}/src/github.com/prometheus/prometheus/VERSION", + returnStdout: true + ).trim() + } + } + + if (TAG_VERSION) { + stage('get previous release version') { + container('jnlp') { + CURRENT_VERSION = github.get_current_tag_version("prometheus", git_project_user, GIT_TOKEN) + echo "$CURRENT_VERSION" + version_list=CURRENT_VERSION.split('-') + NEXT_VERSION = "v${TAG_VERSION}-${version_list[1]}-${MAIN_TAG_VERSION}" + echo "$NEXT_VERSION" + next_versions.putAt('prometheus', NEXT_VERSION) } + } + + build_prometheus(MAIN_TAG_VERSION, FRAMES_NEXT_VERSION, "unstable") + build_prometheus(MAIN_TAG_VERSION, FRAMES_NEXT_VERSION) - sleep(60) + stage('create prometheus prerelease') { + container('jnlp') { + echo "Triggered prometheus development will be builded with last tsdb stable version" + github.delete_release("prometheus", git_project_user, "unstable", GIT_TOKEN) + github.create_prerelease("prometheus", git_project_user, "unstable", GIT_TOKEN, "development") + + echo "Trigger prometheus ${NEXT_VERSION} with tsdb ${MAIN_TAG_VERSION}" + github.create_prerelease("prometheus", git_project_user, NEXT_VERSION, GIT_TOKEN) + } } } - } - } else { - stage('info') { - echo("Unstable tsdb doesn't trigger tsdb-nuclio and prometheus") + } else { + stage('info') { + echo("Unstable tsdb doesn't trigger prometheus") + } } } + } + } + + node("${git_project}-${label}") { + wait_for_release(MAIN_TAG_VERSION, ['prometheus': null]) + } + node("${git_project}-${label}") { + withCredentials([ + string(credentialsId: git_deploy_user_token, variable: 'GIT_TOKEN') + ]) { stage('update release status') { container('jnlp') { github.update_release_status(git_project, git_project_user, "${MAIN_TAG_VERSION}", GIT_TOKEN) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile index 73465498..1c648a81 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/Makefile @@ -32,7 +32,15 @@ BUILD_OPTS := -ldflags " \ -X $(CONFIG_PKG).branch=$(GIT_BRANCH)" \ -v -o "$(GOPATH)/bin/$(TSDBCTL_BIN_NAME)" -TSDB_BUILD_COMMAND ?= CGO_ENABLED=0 go build $(BUILD_OPTS) ./cmd/tsdbctl +TSDB_BUILD_COMMAND ?= GO111MODULE="on" CGO_ENABLED=0 go build $(BUILD_OPTS) ./cmd/tsdbctl + +.PHONY: fmt +fmt: + gofmt -l -s -w . + +.PHONY: get +get: + GO111MODULE="on" go mod tidy .PHONY: test test: @@ -70,6 +78,8 @@ else endif @echo Installing linters... go get -u github.com/pavius/impi/cmd/impi + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.10.2 + cp ./bin/golangci-lint $(GOPATH)/bin/ @echo Verifying imports... $(GOPATH)/bin/impi \ @@ -78,4 +88,12 @@ endif --skip pkg/controller/client \ --scheme stdLocalThirdParty \ ./... - # Imports OK + + @echo Linting... + @$(GOPATH)/bin/golangci-lint run \ + --disable-all --enable=deadcode --enable=goconst --enable=golint --enable=ineffassign \ + --enable=interfacer --enable=unconvert --enable=varcheck --enable=errcheck --enable=gofmt --enable=misspell \ + --enable=staticcheck --enable=gosimple --enable=govet --enable=goconst \ + cmd/... pkg/... internal/... + @echo done linting + diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go index fd9ce089..2b62ab35 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/cmd/tsdbctl/tsdbctl.go @@ -21,6 +21,7 @@ func Run() error { func tearDown(cmd *tsdbctl.RootCommandeer) { if cmd.Reporter != nil { // could be nil if has failed on initialisation + // nolint: errcheck cmd.Reporter.Stop() } } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod index 0f30f3fd..7a8322dc 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.mod @@ -4,29 +4,25 @@ go 1.12 require ( github.com/cespare/xxhash v1.1.0 - github.com/cpuguy83/go-md2man v1.0.10 // indirect github.com/ghodss/yaml v1.0.0 github.com/imdario/mergo v0.3.7 - github.com/kr/pretty v0.1.0 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/nuclio/logger v0.0.1 github.com/nuclio/nuclio-sdk-go v0.0.0-20190205170814-3b507fbd0324 github.com/nuclio/nuclio-test-go v0.0.0-20180704132150-0ce6587f8e37 github.com/nuclio/zap v0.0.2 + github.com/pavius/impi v0.0.0-20200212064320-5db7efa5f87b // indirect github.com/pkg/errors v0.8.1 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a github.com/spf13/cobra v0.0.3 - github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.4.0 - github.com/tinylib/msgp v1.1.1 // indirect github.com/v3io/frames v0.6.8-v0.9.11 - github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d - github.com/v3io/v3io-go-http v0.0.0-20190415143924-cc2fbcde6663 // indirect + github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6 github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 - google.golang.org/genproto v0.0.0-20181026194446-8b5d7a19e2d9 // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect ) replace ( + github.com/v3io/frames => github.com/v3io/frames v0.6.9-v0.9.12.0.20200219120609-981ffb872c73 github.com/xwb1989/sqlparser => github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871 labix.org/v2/mgo => github.com/go-mgo/mgo v0.0.0-20180705113738-7446a0344b7872c067b3d6e1b7642571eafbae17 launchpad.net/gocheck => github.com/go-check/check v0.0.0-20180628173108-788fd78401277ebd861206a03c884797c6ec5541 diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum index 75aa4c29..3bfb9eb0 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/go.sum @@ -4,7 +4,6 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M= github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= @@ -17,6 +16,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-00000000000000-611e8accdfc92c4187d399e95ce826046d4c8d73 h1:HXrYLMPwOwRGzTXb7QIRTWJgnMrbR9FNt7pLRsvk73Q= github.com/golang/groupcache v0.0.0-00000000000000-611e8accdfc92c4187d399e95ce826046d4c8d73/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= @@ -32,17 +32,16 @@ github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8 github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc70QXJ07+2eg2Jy2EC7Mi11BWujeM= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -61,7 +60,9 @@ github.com/nuclio/nuclio-test-go v0.0.0-20180704132150-0ce6587f8e37/go.mod h1:aO github.com/nuclio/zap v0.0.0-20180228181516-4a2bd2f9ef28/go.mod h1:SUxPsgePvlyjx6c5MtGdB50pf0IQThtlyLwISLboeuc= github.com/nuclio/zap v0.0.2 h1:rY5PkMOl8CTkqRqIPuxziBiKK6Mq/8oEurfgRnNtqf0= github.com/nuclio/zap v0.0.2/go.mod h1:SUxPsgePvlyjx6c5MtGdB50pf0IQThtlyLwISLboeuc= -github.com/pavius/zap v0.0.0-20180228181622-8d52692529b8 h1:1N/m7VjDY1Pd30Uwv6bLttZVFQm3n8RUK9Ylf2J+4a4= +github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/pavius/impi v0.0.0-20200212064320-5db7efa5f87b h1:Te2cVBQZncOwLG4qR4Jex2EFd6i8lSqC9GzUFNBwS/0= +github.com/pavius/impi v0.0.0-20200212064320-5db7efa5f87b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pavius/zap v0.0.0-20180228181622-8d52692529b8/go.mod h1:6FWOCx06uh50GClv8S2cfk3asqTJs3qq3ZNRtLZE77I= github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8 h1:WqLgmr/wj9TO5Sc6oYPQRAJBxuHE0NTeuVeFnT+FZVo= github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8/go.mod h1:6FWOCx06uh50GClv8S2cfk3asqTJs3qq3ZNRtLZE77I= @@ -72,19 +73,17 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2+incompatible h1:/YIL6L1Deczl4O/cQ7ZVdrdKwuB6y7EWpw9LkD8xofE= github.com/russross/blackfriday v1.5.2+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -94,28 +93,30 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.1 h1:TnCZ3FIuKeaIy+F45+Cnp+caqdXGy4z74HvwXN+570Y= github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/v3io/frames v0.0.0-20190328123118-1dad1ff610509e7b087d9cd390ed1b452caecf15/go.mod h1:6aKW4Wl4A+gQhXH0JRCVOLgwvcrLyk+fqEpemuie094= github.com/v3io/frames v0.6.8-v0.9.11 h1:oHcw32SbgW/pva6xdGwLXlTZJ+hS8i8AVPeGydhqbuc= github.com/v3io/frames v0.6.8-v0.9.11/go.mod h1:V3j8yjzhNNGXjosCBn7Qf8C8jo25Y+7Ge/SkTK9ya9Q= +github.com/v3io/frames v0.6.9-v0.9.12.0.20200219094046-a5f66ccd64d2 h1:6q7OhKak5k+MuikdLOblDwB0DF+A9+CTw5pTIBUBY1Q= +github.com/v3io/frames v0.6.9-v0.9.12.0.20200219094046-a5f66ccd64d2/go.mod h1:JHybUVwkVwmNNjKq04nualQTtTYZL4aAyOdh/aa5eLU= +github.com/v3io/frames v0.6.9-v0.9.12.0.20200219120609-981ffb872c73 h1:pV7ZuJuPQFdROJAJy+0OBYephwyB14FRFWmBffy53dE= +github.com/v3io/frames v0.6.9-v0.9.12.0.20200219120609-981ffb872c73/go.mod h1:gO8xPhCF8O7Ya2lGrvnDfG/Z7BtY6pzhW3Fd+Mgk7Gs= github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871 h1:myF4tU/HdFWU1UzMdf16cHRbownzsyvL7VKIHqkrSvo= github.com/v3io/sqlparser v0.0.0-20190306105200-4d7273501871/go.mod h1:QD2Bo64oyTWzeV8RFehXS0hZEDFgOK99/h2a6ErRu6E= github.com/v3io/v3io-go v0.0.0-20191024084247-042df6b5ee40eb60996ab7f4e74ec9aa07d996c4/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= github.com/v3io/v3io-go v0.0.0-20191120130819-9003ae83f0b673afb88b862d8f46dcc818684450/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= -github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d h1:OotbIx7+QYju2DlAAVxWz0QFzBicHLc47u9DJGpVUL4= -github.com/v3io/v3io-go v0.0.7-0.20200216132233-3b52a325296d/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= -github.com/v3io/v3io-go-http v0.0.0-20190221115935-53e2b487c9a2 h1:NJc63wM25iS+ci5z7LVwjWD4QM0QpTQw/fovKzatss0= +github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6 h1:+52DyMCjcWg6uXAlTe0KgbOsiQqUKrtL9tBPSERhyFg= +github.com/v3io/v3io-go v0.0.5-0.20191205125653-9003ae83f0b6/go.mod h1:IFb6dJiyvJnOjXUoCoPJ5UViaYjgVYmqJb4fD1qDeLk= github.com/v3io/v3io-go-http v0.0.0-20190221115935-53e2b487c9a2/go.mod h1:GXYcR9MxgfbE3BJdkXki5EclvtS8Nxu2RQNLA8hMMog= github.com/v3io/v3io-go-http v0.0.0-20190415143924-cc2fbcde6663 h1:WZcM/GRBAastacksmv5pODbtr8fJ/0/9EsPDpPfXkRk= github.com/v3io/v3io-go-http v0.0.0-20190415143924-cc2fbcde6663/go.mod h1:GXYcR9MxgfbE3BJdkXki5EclvtS8Nxu2RQNLA8hMMog= github.com/v3io/v3io-tsdb v0.0.0-20190328071546-4e85f3df2d205fc7368d54184bb2ceff949ab4bd/go.mod h1:A+5yKC16QxLf+Fy5v7VvIxSw+jwsKHLhUS7dCYFDLAA= github.com/v3io/v3io-tsdb v0.9.11/go.mod h1:K5jxlyxnb/HGyFlPrJQtIbMWQBY6USFBsHYI/vzMKVo= +github.com/v3io/v3io-tsdb v0.9.14/go.mod h1:Oqvn2Y3EDvz4eX5LW+YfHhv4ho30E2pg+4vebjqEJYE= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.0.0 h1:BwIoZQbBsTo3v2F5lz5Oy3TlTq4wLKTLV260EVTEWco= github.com/valyala/fasthttp v1.0.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= github.com/valyala/fasthttp v1.2.0 h1:dzZJf2IuMiclVjdw0kkT+f9u4YdrapbNyGAN47E/qnk= github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= @@ -128,16 +129,15 @@ golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 h1:x6rhz8Y9CjbgQkccRGmELH6K+LJj7tOoh3XWeC1yaQM= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -148,7 +148,6 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181026194446-8b5d7a19e2d9 h1:26lptpu+T60F849wXfTQMz9ecFf6nTQM0J1JjLSga5U= google.golang.org/genproto v0.0.0-20181026194446-8b5d7a19e2d9/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.16.0 h1:dz5IJGuC2BB7qXR5AyHNwAUBhZscK2xVez7mznh72sY= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/internal/pkg/performance/metrics.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/internal/pkg/performance/metrics.go index fc190eb2..8f2c5f37 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/internal/pkg/performance/metrics.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/internal/pkg/performance/metrics.go @@ -148,8 +148,10 @@ func (mr *MetricReporter) registerShutdownHook() { go func() { sig := <-gracefulStop - mr.logWriter.Write([]byte(fmt.Sprintf("\n**************************\ncaught sig: %+v\n**************************\n", sig))) - metrics.WriteOnce(mr.registry, mr.logWriter) + _, err := mr.logWriter.Write([]byte(fmt.Sprintf("\n**************************\ncaught sig: %+v\n**************************\n", sig))) + if err == nil { + metrics.WriteOnce(mr.registry, mr.logWriter) + } }() } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go index 122adfa7..64d85d16 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go @@ -92,7 +92,6 @@ func SchemaFieldFromString(aggregates []string, col string) ([]config.SchemaFiel for _, val := range aggrToSchemaField { fieldList = append(fieldList, getAggrFullName(val, col)) } - return fieldList, nil } else { field, ok := aggrToSchemaField[trimmed] if !ok { @@ -155,7 +154,7 @@ func AggregatesFromStringListWithCount(split []string) (AggrType, []AggrType, er var hasAggregates bool for _, s := range split { - aggr, err := AggregateFromString(s) + aggr, err := FromString(s) if err != nil { return 0, nil, err } @@ -173,7 +172,7 @@ func AggregatesFromStringListWithCount(split []string) (AggrType, []AggrType, er return aggrMask, aggrList, nil } -func AggregateFromString(aggrString string) (AggrType, error) { +func FromString(aggrString string) (AggrType, error) { trimmed := strings.TrimSpace(aggrString) if trimmed == "" { return 0, nil @@ -295,7 +294,7 @@ func IsCountAggregate(aggr AggrType) bool { return aggr == aggrTypeCount } func HasAggregates(mask AggrType) bool { return mask != aggrTypeNone } -func AggregateMaskToString(mask AggrType) string { +func MaskToString(mask AggrType) string { var output strings.Builder aggCount := 0 for _, raw := range rawAggregates { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/iterator.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/iterator.go index e7dd3dcc..f8699148 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/iterator.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/iterator.go @@ -31,7 +31,7 @@ import ( // Local cache of init arrays per aggregate type. Used to mimic memcopy and initialize data arrays with specific values var initDataArrayCache = map[AggrType][]float64{} -type AggregateSeries struct { +type Series struct { colName string // column name ("v" in timeseries) functions []AggrType // list of aggregation functions to return (count, avg, sum, ..) aggrMask AggrType // the sum of aggregates (or between all aggregates) @@ -41,7 +41,7 @@ type AggregateSeries struct { overlapWindows []int // a list of overlapping windows (* interval), e.g. last 1hr, 6hr, 12hr, 24hr } -func NewAggregateSeries(functions, col string, buckets int, interval, rollupTime int64, windows []int) (*AggregateSeries, error) { +func NewAggregateSeries(functions, col string, buckets int, interval, rollupTime int64, windows []int) (*Series, error) { split := strings.Split(functions, ",") var aggrMask AggrType @@ -61,7 +61,7 @@ func NewAggregateSeries(functions, col string, buckets int, interval, rollupTime aggrMask |= aggrTypeCount } - newAggregateSeries := AggregateSeries{ + newAggregateSeries := Series{ aggrMask: aggrMask, functions: aggrList, colName: col, @@ -74,7 +74,7 @@ func NewAggregateSeries(functions, col string, buckets int, interval, rollupTime return &newAggregateSeries, nil } -func (as *AggregateSeries) CanAggregate(partitionAggr AggrType) bool { +func (as *Series) CanAggregate(partitionAggr AggrType) bool { // keep only real aggregates aggrMask := 0x7f & as.aggrMask // make sure the DB has all the aggregates we need (on bits in the mask) @@ -85,23 +85,23 @@ func (as *AggregateSeries) CanAggregate(partitionAggr AggrType) bool { as.interval >= as.rollupTime && (as.interval%as.rollupTime == 0 || as.interval/as.rollupTime > 3) } -func (as *AggregateSeries) GetAggrMask() AggrType { +func (as *Series) GetAggrMask() AggrType { return as.aggrMask } -func (as *AggregateSeries) GetFunctions() []AggrType { +func (as *Series) GetFunctions() []AggrType { return as.functions } -func (as *AggregateSeries) NumFunctions() int { +func (as *Series) NumFunctions() int { return len(as.functions) } -func (as *AggregateSeries) toAttrName(aggr AggrType) string { +func (as *Series) toAttrName(aggr AggrType) string { return "_" + as.colName + "_" + aggr.String() } -func (as *AggregateSeries) GetAttrNames() []string { +func (as *Series) GetAttrNames() []string { var names []string for _, aggr := range rawAggregates { @@ -114,8 +114,8 @@ func (as *AggregateSeries) GetAttrNames() []string { } // create new aggregation set from v3io aggregation array attributes -func (as *AggregateSeries) NewSetFromAttrs( - length, start, end int, mint, maxt int64, attrs *map[string]interface{}) (*AggregateSet, error) { +func (as *Series) NewSetFromAttrs( + length, start, end int, mint, maxt int64, attrs *map[string]interface{}) (*Set, error) { aggrArrays := map[AggrType][]uint64{} dataArrays := map[AggrType][]float64{} @@ -139,7 +139,7 @@ func (as *AggregateSeries) NewSetFromAttrs( } } - aggrSet := AggregateSet{length: length, interval: as.interval, overlapWin: as.overlapWindows} + aggrSet := Set{length: length, interval: as.interval, overlapWin: as.overlapWindows} aggrSet.dataArrays = dataArrays arrayIndex := start @@ -178,13 +178,13 @@ func (as *AggregateSeries) NewSetFromAttrs( } // prepare new aggregation set from v3io raw chunk attributes (in case there are no aggregation arrays) -func (as *AggregateSeries) NewSetFromChunks(length int) *AggregateSet { +func (as *Series) NewSetFromChunks(length int) *Set { if as.overlapWindows != nil { length = len(as.overlapWindows) } - newAggregateSet := AggregateSet{length: length, interval: as.interval, overlapWin: as.overlapWindows} + newAggregateSet := Set{length: length, interval: as.interval, overlapWin: as.overlapWindows} dataArrays := map[AggrType][]float64{} for _, aggr := range rawAggregates { @@ -199,7 +199,7 @@ func (as *AggregateSeries) NewSetFromChunks(length int) *AggregateSet { return &newAggregateSet } -type AggregateSet struct { +type Set struct { dataArrays map[AggrType][]float64 length int maxCell int @@ -208,12 +208,12 @@ type AggregateSet struct { overlapWin []int } -func (as *AggregateSet) GetMaxCell() int { +func (as *Set) GetMaxCell() int { return as.maxCell } // append the value to a cell in all relevant aggregation arrays -func (as *AggregateSet) AppendAllCells(cell int, val float64) { +func (as *Set) AppendAllCells(cell int, val float64) { if !isValidCell(cell, as) { return @@ -230,7 +230,7 @@ func (as *AggregateSet) AppendAllCells(cell int, val float64) { // append/merge server aggregation values into aggregation per requested interval/step // if the requested step interval is higher than stored interval we need to collapse multiple cells to one -func (as *AggregateSet) mergeArrayCell(aggr AggrType, cell int, val uint64) { +func (as *Set) mergeArrayCell(aggr AggrType, cell int, val uint64) { if cell >= as.length { return @@ -253,13 +253,13 @@ func (as *AggregateSet) mergeArrayCell(aggr AggrType, cell int, val uint64) { } } -func isValidCell(cellIndex int, aSet *AggregateSet) bool { +func isValidCell(cellIndex int, aSet *Set) bool { return cellIndex >= 0 && cellIndex < aSet.length } // function specific aggregation -func (as *AggregateSet) updateCell(aggr AggrType, cell int, val float64) { +func (as *Set) updateCell(aggr AggrType, cell int, val float64) { if !isValidCell(cell, as) { return @@ -268,7 +268,7 @@ func (as *AggregateSet) updateCell(aggr AggrType, cell int, val float64) { cellValue := as.dataArrays[aggr][cell] switch aggr { case aggrTypeCount: - as.dataArrays[aggr][cell] += 1 + as.dataArrays[aggr][cell]++ case aggrTypeSum: as.dataArrays[aggr][cell] += val case aggrTypeSqr: @@ -287,7 +287,7 @@ func (as *AggregateSet) updateCell(aggr AggrType, cell int, val float64) { } // return the value per aggregate or complex function -func (as *AggregateSet) GetCellValue(aggr AggrType, cell int) (float64, bool) { +func (as *Set) GetCellValue(aggr AggrType, cell int) (float64, bool) { if !isValidCell(cell, as) { return math.NaN(), false @@ -338,7 +338,7 @@ func (as *AggregateSet) GetCellValue(aggr AggrType, cell int) (float64, bool) { } // get the time per aggregate cell -func (as *AggregateSet) GetCellTime(base int64, index int) int64 { +func (as *Set) GetCellTime(base int64, index int) int64 { if as.overlapWin == nil { return base + int64(index)*as.interval } @@ -350,7 +350,7 @@ func (as *AggregateSet) GetCellTime(base int64, index int) int64 { return base - int64(as.overlapWin[index])*as.interval } -func (as *AggregateSet) Clear() { +func (as *Set) Clear() { as.maxCell = 0 for aggr := range as.dataArrays { initArray := getOrCreateInitDataArray(aggr, len(as.dataArrays[0])) @@ -359,7 +359,7 @@ func (as *AggregateSet) Clear() { } // Check if cell has data. Assumes that count is always present -func (as *AggregateSet) HasData(cell int) bool { +func (as *Set) HasData(cell int) bool { return as.dataArrays[aggrTypeCount][cell] > 0 } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go index 8e3fa0d3..19322404 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go @@ -50,7 +50,7 @@ type MetricState struct { key string name string hash uint64 - refId uint64 + refID uint64 aggrs []*MetricState @@ -122,7 +122,9 @@ type MetricsCache struct { updatesComplete chan int newUpdates chan int - lastMetric uint64 + lastMetric uint64 + + // TODO: consider switching to synch.Map (https://golang.org/pkg/sync/#Map) cacheMetricMap map[cacheKey]*MetricState // TODO: maybe use hash as key & combine w ref cacheRefMap map[uint64]*MetricState // TODO: maybe turn to list + free list, periodically delete old matrics @@ -187,7 +189,7 @@ func (mc *MetricsCache) addMetric(hash uint64, name string, metric *MetricState) defer mc.mtx.Unlock() mc.lastMetric++ - metric.refId = mc.lastMetric + metric.refID = mc.lastMetric mc.cacheRefMap[mc.lastMetric] = metric mc.cacheMetricMap[cacheKey{name, hash}] = metric if _, ok := mc.NameLabelMap[name]; !ok { @@ -218,6 +220,15 @@ func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint6 return 0, err } + var isValueVariantType bool + // If the value is not of Float type assume it's variant type. + switch v.(type) { + case int, int64, float64, float32: + isValueVariantType = false + default: + isValueVariantType = true + } + name, key, hash := lset.GetKey() err = utils.IsValidMetricName(name) if err != nil { @@ -233,17 +244,15 @@ func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint6 aggrMetric, ok := mc.getMetric(name, hash) if !ok { aggrMetric = &MetricState{Lset: subLset, key: key, name: name, hash: hash} - aggrMetric.store = NewChunkStore(mc.logger, subLset.LabelNames(), true) + aggrMetric.store = newChunkStore(mc.logger, subLset.LabelNames(), true) mc.addMetric(hash, name, aggrMetric) aggrMetrics = append(aggrMetrics, aggrMetric) } } - metric = &MetricState{Lset: lset, key: key, name: name, hash: hash, aggrs: aggrMetrics} - // if the (first) value is not float, use variant encoding, TODO: test w schema - if _, ok := v.(float64); !ok { - metric.isVariant = true - } - metric.store = NewChunkStore(mc.logger, lset.LabelNames(), false) + metric = &MetricState{Lset: lset, key: key, name: name, hash: hash, + aggrs: aggrMetrics, isVariant: isValueVariantType} + + metric.store = newChunkStore(mc.logger, lset.LabelNames(), false) mc.addMetric(hash, name, metric) } else { aggrMetrics = metric.aggrs @@ -252,15 +261,27 @@ func (mc *MetricsCache) Add(lset utils.LabelsIfc, t int64, v interface{}) (uint6 err = metric.error() metric.setError(nil) + if isValueVariantType != metric.isVariant { + newValueType := "numeric" + if isValueVariantType { + newValueType = "string" + } + existingValueType := "numeric" + if metric.isVariant { + existingValueType = "string" + } + return 0, errors.Errorf("Cannot append %v type metric to %v type metric.", newValueType, existingValueType) + } + mc.appendTV(metric, t, v) for _, aggrMetric := range aggrMetrics { mc.appendTV(aggrMetric, t, v) } - return metric.refId, err + return metric.refID, err } -// fast Add to metric (by refId) +// fast Add to metric (by refID) func (mc *MetricsCache) AddFast(ref uint64, t int64, v interface{}) error { err := verifyTimeValid(t) @@ -288,7 +309,7 @@ func (mc *MetricsCache) AddFast(ref uint64, t int64, v interface{}) error { func verifyTimeValid(t int64) error { if t > maxUnixTimeMs || t < minimalUnixTimeMs { - return fmt.Errorf("Time '%d' doesn't seem to be a valid Unix timesamp in milliseconds. The time must be in the years range 1970-2400.", t) + return fmt.Errorf("time '%d' doesn't seem to be a valid Unix timesamp in milliseconds. The time must be in the years range 1970-2400", t) } return nil } @@ -303,7 +324,7 @@ func (mc *MetricsCache) WaitForCompletion(timeout time.Duration) (int, error) { waitChan := make(chan int, 2) mc.asyncAppendChan <- &asyncAppend{metric: nil, t: 0, v: 0, resp: waitChan} - var maxWaitTime time.Duration = 0 + var maxWaitTime time.Duration if timeout == 0 { maxWaitTime = 24 * time.Hour // Almost-infinite time diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go index 11faf119..6a24b450 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go @@ -29,6 +29,7 @@ import ( "github.com/pkg/errors" "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-go/pkg/errors" + "github.com/v3io/v3io-tsdb/pkg/utils" ) // Start event loops for handling metric updates (appends and Get/Update DB responses) @@ -223,7 +224,7 @@ func (mc *MetricsCache) postMetricUpdates(metric *MetricState) { metric.Lock() defer metric.Unlock() - sent := false + var sent bool var err error if metric.getState() == storeStatePreGet { @@ -270,7 +271,7 @@ func (mc *MetricsCache) handleResponse(metric *MetricState, resp *v3io.Response, if resp.Error != nil && metric.getState() != storeStateGet { req := reqInput.(*v3io.UpdateItemInput) - mc.logger.ErrorWith("I/O failure", "id", resp.ID, "err", resp.Error, "key", metric.key, + mc.logger.WarnWith("I/O failure", "id", resp.ID, "err", resp.Error, "key", metric.key, "in-flight", mc.updatesInFlight, "mqueue", mc.metricQueue.Length(), "numsamples", metric.store.samplesQueueLength(), "path", req.Path, "update expression", req.Expression) } else { @@ -293,7 +294,7 @@ func (mc *MetricsCache) handleResponse(metric *MetricState, resp *v3io.Response, } else { clear := func() { resp.Release() - metric.store = NewChunkStore(mc.logger, metric.Lset.LabelNames(), metric.store.isAggr()) + metric.store = newChunkStore(mc.logger, metric.Lset.LabelNames(), metric.store.isAggr()) metric.retryCount = 0 metric.setState(storeStateInit) } @@ -304,8 +305,17 @@ func (mc *MetricsCache) handleResponse(metric *MetricState, resp *v3io.Response, // Metrics with too many update errors go into Error state metric.retryCount++ if e, hasStatusCode := resp.Error.(v3ioerrors.ErrorWithStatusCode); hasStatusCode && e.StatusCode() != http.StatusServiceUnavailable { - mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed with status code %d.", e.StatusCode())) - setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed due to status code %d.", e.StatusCode()))) + // If condition was evaluated as false log this and report this error upstream. + if utils.IsFalseConditionError(resp.Error) { + req := reqInput.(*v3io.UpdateItemInput) + // This might happen on attempt to add metric value of wrong type, i.e. float <-> string + errMsg := fmt.Sprintf("trying to ingest values of incompatible data type. Metric %q has not been updated.", req.Path) + mc.logger.ErrorWith(errMsg) + setError(mc, metric, errors.Wrap(resp.Error, errMsg)) + } else { + mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed with status code %d.", e.StatusCode())) + setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed due to status code %d.", e.StatusCode()))) + } clear() return false } else if metric.retryCount == maxRetriesOnWrite { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go index b9387adf..eadeedb4 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go @@ -41,7 +41,7 @@ import ( const maxLateArrivalInterval = 59 * 60 * 1000 // Max late arrival of 59min // Create a chunk store with two chunks (current, previous) -func NewChunkStore(logger logger.Logger, labelNames []string, aggrsOnly bool) *chunkStore { +func newChunkStore(logger logger.Logger, labelNames []string, aggrsOnly bool) *chunkStore { store := chunkStore{ logger: logger, lastTid: -1, @@ -404,7 +404,7 @@ func (cs *chunkStore) writeChunks(mc *MetricsCache, metric *MetricState) (hasPen if len(cs.pending) > 0 { mc.metricQueue.Push(metric) } - hasPendingUpdates, err = false, nil + hasPendingUpdates = false return } @@ -418,17 +418,20 @@ func (cs *chunkStore) writeChunks(mc *MetricsCache, metric *MetricState) (hasPen var encodingExpr string if !cs.isAggr() { - encodingExpr = fmt.Sprintf("%v='%d'; ", config.EncodingAttrName, activeChunk.appender.Encoding()) + encodingExpr = fmt.Sprintf("%s='%d'; ", config.EncodingAttrName, activeChunk.appender.Encoding()) } - lsetExpr := fmt.Sprintf("%v='%s'; ", config.LabelSetAttrName, metric.key) + lsetExpr := fmt.Sprintf("%s='%s'; ", config.LabelSetAttrName, metric.key) expr = lblexpr + encodingExpr + lsetExpr + expr } // Call the V3IO async UpdateItem method + conditionExpr := fmt.Sprintf("NOT exists(%s) OR (exists(%s) AND %s == '%d')", + config.EncodingAttrName, config.EncodingAttrName, + config.EncodingAttrName, activeChunk.appender.Encoding()) expr += fmt.Sprintf("%v=%d;", config.MaxTimeAttrName, cs.maxTime) // TODO: use max() expr path := partition.GetMetricPath(metric.name, metric.hash, cs.labelNames, cs.isAggr()) request, err := mc.container.UpdateItem( - &v3io.UpdateItemInput{Path: path, Expression: &expr}, metric, mc.responseChan) + &v3io.UpdateItemInput{Path: path, Expression: &expr, Condition: conditionExpr}, metric, mc.responseChan) if err != nil { mc.logger.ErrorWith("UpdateItem failed", "err", err) hasPendingUpdates = false @@ -438,7 +441,7 @@ func (cs *chunkStore) writeChunks(mc *MetricsCache, metric *MetricState) (hasPen // will add user data in request) mc.logger.DebugWith("Update-metric expression", "name", metric.name, "key", metric.key, "expr", expr, "reqid", request.ID) - hasPendingUpdates, err = true, nil + hasPendingUpdates = true cs.performanceReporter.UpdateHistogram("WriteChunksSizeHistogram", int64(pendingSamplesCount)) return }) @@ -467,7 +470,7 @@ func (cs *chunkStore) appendExpression(chunk *attrAppender) string { chunk.state |= chunkStateWriting expr := "" - idx, err := chunk.partition.TimeToChunkId(chunk.chunkMint) + idx, err := chunk.partition.TimeToChunkID(chunk.chunkMint) if err != nil { return "" } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go index ae13c04f..a51d52da 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/vartype.go @@ -30,21 +30,30 @@ import ( ) const ( - varTypeNil byte = 0 - varTypeBlob byte = 1 - varTypeString byte = 2 - varTypeBool byte = 3 + varTypeNil byte = 0 + // nolint: deadcode,varcheck + varTypeBlob byte = 1 + varTypeString byte = 2 + // nolint: deadcode,varcheck + varTypeBool byte = 3 + // nolint: deadcode,varcheck varTypeFloat32 byte = 4 varTypeFloat64 byte = 5 - varTypeInt8 byte = 8 - varTypeInt16 byte = 9 - varTypeInt32 byte = 10 - varTypeInt64 byte = 11 + // nolint: deadcode,varcheck + varTypeInt8 byte = 8 + // nolint: deadcode,varcheck + varTypeInt16 byte = 9 + // nolint: deadcode,varcheck + varTypeInt32 byte = 10 + // nolint: deadcode,varcheck + varTypeInt64 byte = 11 ) const ( varValueNone byte = 0 + // nolint: deadcode,varcheck varValueZero byte = 1 + // nolint: deadcode,varcheck varValueOnes byte = 2 varValueAny byte = 3 ) @@ -108,36 +117,18 @@ func (a *varAppender) Chunk() Chunk { } func (a *varAppender) Append(t int64, v interface{}) { - if v == nil { a.appendNoValue(t, varTypeNil, varValueNone) return } - switch vType := v.(type) { - case float64: - val := v.(float64) - if val == 0 { - a.appendNoValue(t, varTypeFloat64, varValueZero) - return - - } - - if math.IsNaN(val) { - a.appendNoValue(t, varTypeFloat64, varValueNone) - return - } - - a.appendWithUint(t, varTypeFloat64, math.Float64bits(val)) - + switch val := v.(type) { case string: - val := []byte(v.(string)) - a.appendWithValue(t, varTypeString, val) + a.appendWithValue(t, varTypeString, []byte(val)) default: - a.logger.Error("unsupported type %v of value %v\n", vType, v) + a.logger.Error("unsupported type %T of value %v\n", v, v) } - } func (a *varAppender) appendNoValue(t int64, varType, varVal byte) { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go index 44029b5f..06e8df5c 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/chunkenc/xor.go @@ -173,7 +173,18 @@ func (a *xorAppender) Chunk() Chunk { func (a *xorAppender) Append(t int64, vvar interface{}) { var tDelta uint64 num := *a.samples - v := vvar.(float64) + + var v float64 + switch typedValue := vvar.(type) { + case int: + v = float64(typedValue) + case float64: + v = typedValue + default: + a.logger.Warn("Discarding sample {time: %d, value: %v}, as it's value is of incompatible data type. "+ + "Reason: expected 'float' actual '%T'.", t, vvar, vvar) + return + } // Do not append if sample is too old. if t < a.t { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go index 48d2bcbc..58bf00a6 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go @@ -62,16 +62,14 @@ const ( DefaultUseServerAggregateCoefficient = 3 // KV attribute names - MaxTimeAttrName = "_maxtime" - LabelSetAttrName = "_lset" - EncodingAttrName = "_enc" - OutOfOrderAttrName = "_ooo" - MetricNameAttrName = "_name" - ObjectNameAttrName = "__name" - ChunkAttrPrefix = "_v" - AggregateAttrPrefix = "_v_" - MtimeSecsAttributeName = "__mtime_secs" - MtimeNSecsAttributeName = "__mtime_nsecs" + MaxTimeAttrName = "_maxtime" + LabelSetAttrName = "_lset" + EncodingAttrName = "_enc" + OutOfOrderAttrName = "_ooo" + MetricNameAttrName = "_name" + ObjectNameAttrName = "__name" + ChunkAttrPrefix = "_v" + AggregateAttrPrefix = "_v_" PrometheusMetricNameAttribute = "__name__" @@ -123,14 +121,14 @@ type V3ioConfig struct { // V3IO TSDB connection information - web-gateway service endpoint, // TSDB data container, relative TSDB table path within the container, and // authentication credentials for the web-gateway service - WebApiEndpoint string `json:"webApiEndpoint"` + WebAPIEndpoint string `json:"webApiEndpoint"` Container string `json:"container"` TablePath string `json:"tablePath"` Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` AccessKey string `json:"accessKey,omitempty"` - HttpTimeout string `json:"httpTimeout,omitempty"` + HTTPTimeout string `json:"httpTimeout,omitempty"` // Disabled = true disables the V3IO TSDB configuration in Prometheus and // enables the internal Prometheus TSDB instead @@ -315,12 +313,11 @@ func (config V3ioConfig) String() string { config.AccessKey = "SANITIZED" } - sanitizedConfigJson, err := json.Marshal(&config) + sanitizedConfigJSON, err := json.Marshal(&config) if err == nil { - return string(sanitizedConfigJson) - } else { - return fmt.Sprintf("Unable to read config: %v", err) + return string(sanitizedConfigJSON) } + return fmt.Sprintf("Unable to read config: %v", err) } func (*V3ioConfig) merge(cfg *V3ioConfig) (*V3ioConfig, error) { @@ -440,8 +437,8 @@ func initDefaults(cfg *V3ioConfig) { cfg.DisableNginxMitigation = &defaultDisableNginxMitigation } - if cfg.WebApiEndpoint == "" { - cfg.WebApiEndpoint = os.Getenv("V3IO_API") + if cfg.WebAPIEndpoint == "" { + cfg.WebAPIEndpoint = os.Getenv("V3IO_API") } if cfg.AccessKey == "" { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/formatters.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/formatters.go index a38c3e4b..2d50a450 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/formatters.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/formatters.go @@ -91,7 +91,7 @@ func (f csvFormatter) Write(out io.Writer, set utils.SeriesSet) error { } -type simpleJsonFormatter struct { +type simpleJSONFormatter struct { baseFormatter } @@ -100,7 +100,7 @@ const metricTemplate = ` "datapoints": [%s] }` -func (f simpleJsonFormatter) Write(out io.Writer, set utils.SeriesSet) error { +func (f simpleJSONFormatter) Write(out io.Writer, set utils.SeriesSet) error { firstSeries := true output := "[" diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/type.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/type.go index 95a19506..9c1f0923 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/type.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/formatter/type.go @@ -11,9 +11,9 @@ import ( const DefaultOutputFormat = "text" -func NewFormatter(format string, cfg *FormatterConfig) (Formatter, error) { +func NewFormatter(format string, cfg *Config) (Formatter, error) { if cfg == nil { - cfg = &FormatterConfig{TimeFormat: time.RFC3339} + cfg = &Config{TimeFormat: time.RFC3339} } switch format { case "", DefaultOutputFormat: @@ -21,7 +21,7 @@ func NewFormatter(format string, cfg *FormatterConfig) (Formatter, error) { case "csv": return csvFormatter{baseFormatter{cfg: cfg}}, nil case "json": - return simpleJsonFormatter{baseFormatter{cfg: cfg}}, nil + return simpleJSONFormatter{baseFormatter{cfg: cfg}}, nil case "none": return testFormatter{baseFormatter{cfg: cfg}}, nil @@ -34,12 +34,12 @@ type Formatter interface { Write(out io.Writer, set utils.SeriesSet) error } -type FormatterConfig struct { +type Config struct { TimeFormat string } type baseFormatter struct { - cfg *FormatterConfig + cfg *Config } func labelsToStr(labels utils.Labels) (string, string) { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go index f82ed68a..73103918 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go @@ -103,36 +103,33 @@ func (p *PartitionManager) TimeToPart(t int64) (*DBPartition, error) { // Rounding t to the nearest PartitionInterval multiple _, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) return p.headPartition, err - } else { - if t >= p.headPartition.startTime { - if (t - p.headPartition.startTime) >= p.currentPartitionInterval { - _, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) - if err != nil { - return nil, err - } + } + if t >= p.headPartition.startTime { + if (t - p.headPartition.startTime) >= p.currentPartitionInterval { + _, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + if err != nil { + return nil, err } - return p.headPartition, nil - } else { - // Iterate backwards; ignore the last element as it's the head partition - for i := len(p.partitions) - 2; i >= 0; i-- { - if t >= p.partitions[i].startTime { - if t < p.partitions[i].GetEndTime() { - return p.partitions[i], nil - } else { - part, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) - if err != nil { - return nil, err - } - return part, nil - } - } + } + return p.headPartition, nil + } + // Iterate backwards; ignore the last element as it's the head partition + for i := len(p.partitions) - 2; i >= 0; i-- { + if t >= p.partitions[i].startTime { + if t < p.partitions[i].GetEndTime() { + return p.partitions[i], nil + } + part, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + if err != nil { + return nil, err } - head := p.headPartition - part, _ := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) - p.headPartition = head return part, nil } } + head := p.headPartition + part, _ := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + p.headPartition = head + return part, nil } func (p *PartitionManager) createAndUpdatePartition(t int64) (*DBPartition, error) { @@ -198,7 +195,7 @@ func (p *PartitionManager) updateSchema() error { } input := &v3io.PutItemInput{Path: schemaFilePath, Attributes: attributes} - _, err := p.container.PutItemSync(input) + err := p.container.PutItemSync(input) if err != nil { outerError = errors.Wrap(err, "failed to update partitions table.") @@ -238,7 +235,7 @@ func (p *PartitionManager) DeletePartitionsFromSchema(partitionsToDelete []*DBPa deletePartitionExpression.WriteString(");") } expression := deletePartitionExpression.String() - _, err := p.container.UpdateItemSync(&v3io.UpdateItemInput{Path: p.GetSchemaFilePath(), Expression: &expression}) + err := p.container.UpdateItemSync(&v3io.UpdateItemInput{Path: p.GetSchemaFilePath(), Expression: &expression}) if err != nil { return err } @@ -592,33 +589,6 @@ func (p *DBPartition) Time2Bucket(t int64) int { return int((t - p.startTime) / p.rollupTime) } -// Return the start time of an aggregation bucket by id -func (p *DBPartition) GetAggregationBucketStartTime(id int) int64 { - return p.startTime + int64(id)*p.rollupTime -} - -// Return the end time of an aggregation bucket by id -func (p *DBPartition) GetAggregationBucketEndTime(id int) int64 { - return p.startTime + int64(id+1)*p.rollupTime - 1 -} - -func (p *DBPartition) Times2BucketRange(start, end int64) []int { - var buckets []int - - if start > p.GetEndTime() || end < p.startTime { - return buckets - } - - startingAggrBucket := p.Time2Bucket(start) - endAggrBucket := p.Time2Bucket(end) - - for bucketID := startingAggrBucket; bucketID <= endAggrBucket; bucketID++ { - buckets = append(buckets, bucketID) - } - - return buckets -} - // Return the nearest chunk start time for the specified time func (p *DBPartition) GetChunkMint(t int64) int64 { if t > p.GetEndTime() { @@ -641,43 +611,11 @@ func (p *DBPartition) IsAheadOfChunk(mint, t int64) bool { } // Return the ID of the chunk whose range includes the specified time -func (p *DBPartition) TimeToChunkId(tmilli int64) (int, error) { +func (p *DBPartition) TimeToChunkID(tmilli int64) (int, error) { if tmilli >= p.startTime && tmilli <= p.GetEndTime() { return int((tmilli-p.startTime)/p.chunkInterval) + 1, nil - } else { - return -1, errors.Errorf("Time %d isn't within the range of this partition.", tmilli) - } -} - -// Check if a chunk (by attribute name) is in the given time range. -func (p *DBPartition) IsChunkInRangeByAttr(attr string, mint, maxt int64) bool { - - // Discard '_v' prefix - chunkIDStr := attr[2:] - chunkID, err := strconv.ParseInt(chunkIDStr, 10, 64) - if err != nil { - return false } - - chunkStartTime := p.startTime + (chunkID-1)*p.chunkInterval - chunkEndTime := chunkStartTime + p.chunkInterval - 1 - - return mint <= chunkStartTime && maxt >= chunkEndTime -} - -// Get a chunk's start time by it's attribute name -func (p *DBPartition) GetChunkStartTimeByAttr(attr string) (int64, error) { - - // Discard '_v' prefix - chunkIDStr := attr[2:] - chunkID, err := strconv.ParseInt(chunkIDStr, 10, 64) - if err != nil { - return 0, err - } - - chunkStartTime := p.startTime + (chunkID-1)*p.chunkInterval - - return chunkStartTime, nil + return -1, errors.Errorf("Time %d isn't within the range of this partition.", tmilli) } // Check whether the specified time is within the range of this partition @@ -720,11 +658,11 @@ func (p *DBPartition) Range2Attrs(col string, mint, maxt int64) ([]string, int64 // Return a list of all the chunk IDs that match the specified time range func (p *DBPartition) Range2Cids(mint, maxt int64) []int { var list []int - start, err := p.TimeToChunkId(mint) + start, err := p.TimeToChunkID(mint) if err != nil { start = 1 } - end, err := p.TimeToChunkId(maxt) + end, err := p.TimeToChunkID(maxt) if err != nil { end = int(p.partitionInterval / p.chunkInterval) } @@ -740,7 +678,7 @@ func (p *DBPartition) GetHashingBuckets() int { func (p *DBPartition) ToMap() map[string]interface{} { attributes := make(map[string]interface{}, 5) - attributes["aggregates"] = aggregate.AggregateMaskToString(p.AggrType()) + attributes["aggregates"] = aggregate.MaskToString(p.AggrType()) attributes["rollupTime"] = p.rollupTime attributes["chunkInterval"] = p.chunkInterval attributes["partitionInterval"] = p.partitionInterval diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator.go index 3484f820..a10fae0d 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator.go @@ -49,10 +49,9 @@ func newRawChunkIterator(queryResult *qryResults, log logger.Logger) utils.Serie if len(newIterator.chunks) == 0 { // If there's no data, create a null iterator return &utils.NullSeriesIterator{} - } else { - newIterator.iter = newIterator.chunks[0].Iterator() - return &newIterator } + newIterator.iter = newIterator.chunks[0].Iterator() + return &newIterator } // Advance the iterator to the specified chunk and time diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go index ce214746..28030d2a 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/chunkIterator_test.go @@ -85,11 +85,26 @@ func (suite *testRawChunkIterSuite) TestRawChunkIteratorWithZeroValue() { prevT, prevV := iter.PeakBack() suite.Require().Equal(ingestData[index].Time, t, "current time does not match") - suite.Require().Equal(ingestData[index].Value, v, "current value does not match") + + switch val := ingestData[index].Value.(type) { + case float64: + suite.Require().Equal(val, v, "current value does not match") + case int: + suite.Require().Equal(float64(val), v, "current value does not match") + default: + suite.Require().Equal(val, v, "current value does not match") + } if index > 0 { suite.Require().Equal(ingestData[index-1].Time, prevT, "current time does not match") - suite.Require().Equal(ingestData[index-1].Value, prevV, "current value does not match") + switch val := ingestData[index-1].Value.(type) { + case float64: + suite.Require().Equal(val, prevV, "current value does not match") + case int: + suite.Require().Equal(float64(val), prevV, "current value does not match") + default: + suite.Require().Equal(val, prevV, "current value does not match") + } } index++ } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go index 35e95d10..ff4bb796 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/collector.go @@ -223,7 +223,7 @@ func downsampleRawData(ctx *selectQueryContext, res *qryResults, if it.Seek(currCellTime) { t, v := it.At() if t == currCellTime { - _ = res.frame.setDataAt(col.Name(), int(currCell), v) + _ = res.frame.setDataAt(col.Name(), currCell, v) } else { prevT, prevV := it.PeakBack() @@ -236,7 +236,7 @@ func downsampleRawData(ctx *selectQueryContext, res *qryResults, // Check if the interpolation was successful in terms of exceeding tolerance if !(interpolatedT == 0 && interpolatedV == 0) { - _ = res.frame.setDataAt(col.Name(), int(currCell), interpolatedV) + _ = res.frame.setDataAt(col.Name(), currCell, interpolatedV) } } } @@ -249,7 +249,10 @@ func downsampleRawData(ctx *selectQueryContext, res *qryResults, func aggregateClientAggregatesCrossSeries(ctx *selectQueryContext, res *qryResults, previousPartitionLastTime int64, previousPartitionLastValue float64) (int64, float64, error) { ctx.logger.Debug("using Client Aggregates Collector for metric %v", res.name) - it := newRawChunkIterator(res, ctx.logger).(*RawChunkIterator) + it, ok := newRawChunkIterator(res, ctx.logger).(*RawChunkIterator) + if !ok { + return previousPartitionLastTime, previousPartitionLastValue, nil + } var previousPartitionEndBucket int if previousPartitionLastTime != 0 { @@ -268,7 +271,7 @@ func aggregateClientAggregatesCrossSeries(ctx *selectQueryContext, res *qryResul if t == currBucketTime { for _, col := range res.frame.columns { if col.GetColumnSpec().metric == res.name { - _ = res.frame.setDataAt(col.Name(), int(currBucket), v) + _ = res.frame.setDataAt(col.Name(), currBucket, v) } } } else { @@ -284,7 +287,7 @@ func aggregateClientAggregatesCrossSeries(ctx *selectQueryContext, res *qryResul if col.GetColumnSpec().metric == res.name { interpolatedT, interpolatedV := col.GetInterpolationFunction()(prevT, t, currBucketTime, prevV, v) if !(interpolatedT == 0 && interpolatedV == 0) { - _ = res.frame.setDataAt(col.Name(), int(currBucket), interpolatedV) + _ = res.frame.setDataAt(col.Name(), currBucket, interpolatedV) } } } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go index 4bf573ea..a903122f 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go @@ -7,6 +7,7 @@ import ( "github.com/pkg/errors" "github.com/v3io/frames" + "github.com/v3io/frames/pb" "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" @@ -22,7 +23,7 @@ type frameIterator struct { } // create new frame set iterator, frame iter has a SeriesSet interface (for Prometheus) plus columnar interfaces -func NewFrameIterator(ctx *selectQueryContext) (*frameIterator, error) { +func newFrameIterator(ctx *selectQueryContext) (*frameIterator, error) { if !ctx.isRawQuery() { for _, f := range ctx.frameList { if err := f.finishAllColumns(); err != nil { @@ -104,7 +105,7 @@ func (fi *frameIterator) Err() error { } // data frame, holds multiple value columns and an index (time) column -func NewDataFrame(columnsSpec []columnMeta, indexColumn Column, lset utils.Labels, hash uint64, isRawQuery bool, columnSize int, useServerAggregates, showAggregateLabel bool) (*dataFrame, error) { +func newDataFrame(columnsSpec []columnMeta, indexColumn Column, lset utils.Labels, hash uint64, isRawQuery bool, columnSize int, useServerAggregates, showAggregateLabel bool) (*dataFrame, error) { df := &dataFrame{lset: lset, hash: hash, isRawSeries: isRawQuery, showAggregateLabel: showAggregateLabel} // is raw query if isRawQuery { @@ -184,7 +185,7 @@ func createColumn(col columnMeta, columnSize int, useServerAggregates bool) (Col column = NewVirtualColumn(col.getColumnName(), col, columnSize, function) } } else { - column = NewDataColumn(col.getColumnName(), col, columnSize, frames.FloatType) + column = newDataColumn(col.getColumnName(), col, columnSize, frames.FloatType) } return column, nil @@ -193,9 +194,8 @@ func createColumn(col columnMeta, columnSize int, useServerAggregates bool) (Col func getAggreagteFunction(aggrType aggregate.AggrType, useServerAggregates bool) (func(interface{}, interface{}) interface{}, error) { if useServerAggregates { return aggregate.GetServerAggregationsFunction(aggrType) - } else { - return aggregate.GetClientAggregationsFunction(aggrType) } + return aggregate.GetClientAggregationsFunction(aggrType) } func fillDependantColumns(wantedColumn Column, df *dataFrame) { @@ -247,6 +247,7 @@ type dataFrame struct { index Column columnByName map[string]int // name -> index in columns nonEmptyRowsIndicators []bool + nullValuesMaps []*pb.NullValuesMap metrics map[string]struct{} metricToCountColumn map[string]Column @@ -379,19 +380,18 @@ func (d *dataFrame) Index() (Column, error) { func (d *dataFrame) TimeSeries(i int) (utils.Series, error) { if d.isRawSeries { return d.rawColumns[i], nil - } else { - currentColumn, err := d.ColumnAt(i) - if err != nil { - return nil, err - } - - return NewDataFrameColumnSeries(d.index, - currentColumn, - d.metricToCountColumn[currentColumn.GetColumnSpec().metric], - d.Labels(), - d.hash, - d.showAggregateLabel), nil } + currentColumn, err := d.ColumnAt(i) + if err != nil { + return nil, err + } + + return NewDataFrameColumnSeries(d.index, + currentColumn, + d.metricToCountColumn[currentColumn.GetColumnSpec().metric], + d.Labels(), + d.hash, + d.showAggregateLabel), nil } // Creates Frames.columns out of tsdb columns. @@ -413,7 +413,10 @@ func (d *dataFrame) finishAllColumns() error { case *ConcreteColumn, *dataColumn: value, err := col.getBuilder().At(i) if err != nil || value == nil { - col.getBuilder().Set(i, math.NaN()) + err := col.getBuilder().Set(i, math.NaN()) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("could not create new column at index %d", i)) + } } } } @@ -468,17 +471,19 @@ func (d *dataFrame) finishAllColumns() error { // func (d *dataFrame) rawSeriesToColumns() error { var timeData []time.Time - - columns := make([]frames.ColumnBuilder, len(d.rawColumns)) - nonExhaustedIterators := len(d.rawColumns) - seriesToDataType := make([]frames.DType, len(d.rawColumns)) - seriesTodefaultValue := make([]interface{}, len(d.rawColumns)) - currentTime := int64(math.MaxInt64) + var currentTime int64 + numberOfRawColumns := len(d.rawColumns) + columns := make([]frames.ColumnBuilder, numberOfRawColumns) + nonExhaustedIterators := numberOfRawColumns + seriesToDataType := make([]frames.DType, numberOfRawColumns) + seriesToDefaultValue := make([]interface{}, numberOfRawColumns) nextTime := int64(math.MaxInt64) - seriesHasMoreData := make([]bool, len(d.rawColumns)) - + seriesHasMoreData := make([]bool, numberOfRawColumns) emptyMetrics := make(map[int]string) + d.nullValuesMaps = make([]*pb.NullValuesMap, 0) + nullValuesRowIndex := -1 + for i, rawSeries := range d.rawColumns { if rawSeries == nil { missingColumn := "(unknown column)" @@ -511,12 +516,12 @@ func (d *dataFrame) rawSeriesToColumns() error { columns[i] = frames.NewSliceColumnBuilder(rawSeries.Labels().Get(config.PrometheusMetricNameAttribute), frames.StringType, 0) seriesToDataType[i] = frames.StringType - seriesTodefaultValue[i] = "" + seriesToDefaultValue[i] = "" } else { columns[i] = frames.NewSliceColumnBuilder(rawSeries.Labels().Get(config.PrometheusMetricNameAttribute), frames.FloatType, 0) seriesToDataType[i] = frames.FloatType - seriesTodefaultValue[i] = math.NaN() + seriesToDefaultValue[i] = math.NaN() } } @@ -525,6 +530,10 @@ func (d *dataFrame) rawSeriesToColumns() error { nextTime = int64(math.MaxInt64) timeData = append(timeData, time.Unix(currentTime/1000, (currentTime%1000)*1e6)) + // add new row to null values map + d.nullValuesMaps = append(d.nullValuesMaps, &pb.NullValuesMap{NullColumns: make(map[string]bool)}) + nullValuesRowIndex++ + for seriesIndex, rawSeries := range d.rawColumns { if rawSeries == nil { continue @@ -541,7 +550,10 @@ func (d *dataFrame) rawSeriesToColumns() error { } if t == currentTime { - columns[seriesIndex].Append(v) + e := columns[seriesIndex].Append(v) + if e != nil { + return errors.Wrap(e, fmt.Sprintf("could not append value %v", v)) + } if iter.Next() { t, _ = iter.At() } else { @@ -549,7 +561,11 @@ func (d *dataFrame) rawSeriesToColumns() error { seriesHasMoreData[seriesIndex] = false } } else { - columns[seriesIndex].Append(seriesTodefaultValue[seriesIndex]) + e := columns[seriesIndex].Append(seriesToDefaultValue[seriesIndex]) + if e != nil { + return errors.Wrap(e, fmt.Sprintf("could not append from default value %v", seriesToDefaultValue[seriesIndex])) + } + d.nullValuesMaps[nullValuesRowIndex].NullColumns[columns[seriesIndex].Name()] = true } if seriesHasMoreData[seriesIndex] && t < nextTime { @@ -560,10 +576,14 @@ func (d *dataFrame) rawSeriesToColumns() error { numberOfRows := len(timeData) colSpec := columnMeta{metric: "time"} - d.index = NewDataColumn("time", colSpec, numberOfRows, frames.TimeType) - d.index.SetData(timeData, numberOfRows) + d.index = newDataColumn("time", colSpec, numberOfRows, frames.TimeType) + e := d.index.SetData(timeData, numberOfRows) + if e != nil { + return errors.Wrap(e, fmt.Sprintf("could not set data, timeData=%v, numberOfRows=%v", timeData, numberOfRows)) + } + + d.columns = make([]Column, numberOfRawColumns) - d.columns = make([]Column, len(d.rawColumns)) for i, series := range d.rawColumns { if series == nil { continue @@ -571,7 +591,7 @@ func (d *dataFrame) rawSeriesToColumns() error { name := series.Labels().Get(config.PrometheusMetricNameAttribute) spec := columnMeta{metric: name} - col := NewDataColumn(name, spec, numberOfRows, seriesToDataType[i]) + col := newDataColumn(name, spec, numberOfRows, seriesToDataType[i]) col.framesCol = columns[i].Finish() d.columns[i] = col } @@ -583,13 +603,18 @@ func (d *dataFrame) rawSeriesToColumns() error { } for index, metricName := range emptyMetrics { spec := columnMeta{metric: metricName} - col := NewDataColumn(metricName, spec, numberOfRows, frames.FloatType) + col := newDataColumn(metricName, spec, numberOfRows, frames.FloatType) framesCol, err := frames.NewSliceColumn(metricName, nullValues) if err != nil { return errors.Wrap(err, fmt.Sprintf("could not create empty column '%v'", metricName)) } col.framesCol = framesCol d.columns[index] = col + + // mark empty columns + for i := 0; i < numberOfRows; i++ { + d.nullValuesMaps[i].NullColumns[col.name] = true + } } } @@ -614,7 +639,7 @@ func (d *dataFrame) GetFrame() (frames.Frame, error) { } } - return frames.NewFrame(framesColumns, []frames.Column{d.index.FramesColumn()}, d.Labels().Map()) + return frames.NewFrameWithNullValues(framesColumns, []frames.Column{d.index.FramesColumn()}, d.Labels().Map(), d.nullValuesMaps) } // Column object, store a single value or index column/array @@ -702,7 +727,7 @@ func (c *basicColumn) GetInterpolationFunction() InterpolationFunction { return c.interpolationFunction } -func NewDataColumn(name string, colSpec columnMeta, size int, datatype frames.DType) *dataColumn { +func newDataColumn(name string, colSpec columnMeta, size int, datatype frames.DType) *dataColumn { dc := &dataColumn{basicColumn: basicColumn{name: name, spec: colSpec, size: size, interpolationFunction: GetInterpolateFunc(colSpec.interpolationType, colSpec.interpolationTolerance), builder: frames.NewSliceColumnBuilder(name, datatype, size)}} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go index efb5fd5d..45b8724e 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/client_aggregates_integration_test.go @@ -80,7 +80,7 @@ func (suite *testClientAggregatesSuite) TestQueryAggregateWithNameWildcard() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[metricName][aggr], data, "queried data does not match expected") + suite.compareMultipleMetrics(data, expected, metricName, aggr) } assert.Equal(suite.T(), len(expectedData)*len(expected), seriesCount, "series count didn't match expected") @@ -139,7 +139,7 @@ func (suite *testClientAggregatesSuite) TestQueryAggregateWithFilterOnMetricName suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[metricName][aggr], data, "queried data does not match expected") + suite.compareMultipleMetrics(data, expected, metricName, aggr) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -195,7 +195,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesSinglePartition() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -219,8 +219,8 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartition() { tsdbtest.TestOption{ Key: tsdbtest.OptTimeSeries, Value: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", Labels: labels1, + Name: "cpu", Data: ingestedData}, }}) tsdbtest.InsertData(suite.T(), testParams) @@ -255,7 +255,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartition() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -315,7 +315,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartitionNonCon suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") @@ -371,7 +371,7 @@ func (suite *testClientAggregatesSuite) TestClientAggregatesMultiPartitionOneSte suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -473,7 +473,7 @@ func (suite *testClientAggregatesSuite) TestSelectAggregatesByRequestedColumns() suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -530,7 +530,7 @@ func (suite *testClientAggregatesSuite) TestSelectAggregatesAndRawByRequestedCol suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -588,7 +588,7 @@ func (suite *testClientAggregatesSuite) TestQueryAllData() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -618,7 +618,8 @@ func (suite *testClientAggregatesSuite) TestAggregatesWithZeroStep() { }}) tsdbtest.InsertData(suite.T(), testParams) - expected := map[string][]tsdbtest.DataPoint{"max": {{Time: suite.basicQueryTime, Value: 40}}, + expected := map[string][]tsdbtest.DataPoint{ + "max": {{Time: suite.basicQueryTime, Value: 40}}, "min": {{Time: suite.basicQueryTime, Value: 10}}, "sum": {{Time: suite.basicQueryTime, Value: 100}}, "count": {{Time: suite.basicQueryTime, Value: 4}}, @@ -646,7 +647,9 @@ func (suite *testClientAggregatesSuite) TestAggregatesWithZeroStep() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + for i, dataPoint := range expected[agg] { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } } assert.Equal(suite.T(), 4, seriesCount, "series count didn't match expected") @@ -698,7 +701,7 @@ func (suite *testClientAggregatesSuite) TestUsePreciseAggregationsConfig() { suite.T().Fatal(err) } - suite.Require().Equal(expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } suite.Require().Equal(3, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go index 6488677c..624ec921 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/cross_series_aggregation_integration_test.go @@ -82,8 +82,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesTimesFalls if err != nil { suite.T().Fatal(err) } - - suite.Require().Equal(expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -153,7 +152,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregates() { suite.T().Fatal(err) } - suite.Require().Equal(expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -219,7 +218,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiParti suite.T().Fatal(err) } - suite.Require().Equal(expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -285,7 +284,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesWithInterp suite.T().Fatal(err) } - suite.Require().Equal(expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -359,8 +358,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiParti if err != nil { suite.T().Fatal(err) } - - suite.Require().Equal(expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -444,8 +442,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesMultiParti if err != nil { suite.T().Fatal(err) } - - suite.Require().Equal(expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -512,8 +509,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesWithInterp if err != nil { suite.T().Fatal(err) } - - suite.Require().Equal(expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -570,7 +566,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesSinglePart suite.T().Fatal(err) } - suite.Require().Equal(expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -629,8 +625,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestOnlyVirtualCrossSeriesAggregate if err != nil { suite.T().Fatal(err) } - - suite.Require().Equal(expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -699,7 +694,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesSameLabelM metricName := set.At().Labels().Get(config.PrometheusMetricNameAttribute) suite.NoError(err) - suite.Require().Equal(expected[fmt.Sprintf("%v-%v", agg, metricName)], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, fmt.Sprintf("%v-%v", agg, metricName)) } suite.Require().Equal(len(expected), seriesCount, "series count didn't match expected") @@ -768,7 +763,7 @@ func (suite *testCrossSeriesAggregatesSuite) TestCrossSeriesAggregatesDifferentL data, err := tsdbtest.IteratorToSlice(iter) suite.NoError(err) - suite.Require().Equal(expected, data, "queried data does not match expected") + suite.compareSingleMetric(data, expected) } suite.Require().Equal(2, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go index ace74820..496d7757 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/dataframe_query_integration_test.go @@ -3,6 +3,7 @@ package pqueriertest import ( + "errors" "fmt" "math" "strings" @@ -86,7 +87,17 @@ func (suite *testSelectDataframeSuite) TestAggregatesWithZeroStepSelectDataframe currentColAggregate := strings.Split(col.Name(), "(")[0] f, err := col.FloatAt(0) assert.NoError(suite.T(), err) - suite.Require().Equal(expected[currentColAggregate].Value, f) + + var expectedFloat float64 + switch val := expected[currentColAggregate].Value.(type) { + case int: + expectedFloat = float64(val) + case float64: + expectedFloat = val + default: + suite.Failf("invalid data type", "expected int or float, actual type is %t", val) + } + suite.Require().Equal(expectedFloat, f) } } @@ -208,11 +219,23 @@ func (suite *testSelectDataframeSuite) Test2Series1EmptySelectDataframe() { assert.Equal(suite.T(), len(ingestedData), col.Len()) for i := 0; i < col.Len(); i++ { currentExpected := expected[col.Name()][i].Value - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - - if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { - assert.Equal(suite.T(), currentExpected, f) + switch val := currentExpected.(type) { + case float64: + fv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(fv)) { + assert.Equal(suite.T(), currentExpected, fv) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + sv, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, sv) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) } } } @@ -368,11 +391,24 @@ func (suite *testSelectDataframeSuite) TestQueryDataFrameMultipleMetricsWithMult currentExpectedData := expectedData[fmt.Sprintf("%v-%v", col.Name(), frame.Labels()["os"])] assert.Equal(suite.T(), len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { - assert.Equal(suite.T(), currentExpected, f) + switch val := currentExpected.(type) { + case float64: + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + s, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, s) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) } } } @@ -612,11 +648,24 @@ func (suite *testSelectDataframeSuite) TestQueryDataFrameMultipleMetrics() { currentExpectedData := expectedData[col.Name()] suite.Require().Equal(len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { - suite.Require().Equal(currentExpected, f) + switch val := currentExpected.(type) { + case float64: + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + s, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, s) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) } } } @@ -699,11 +748,23 @@ func (suite *testSelectDataframeSuite) TestColumnOrder() { currentExpectedData := expectedData[col.Name()] suite.Require().Equal(len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - - if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { - suite.Require().Equal(currentExpected, f) + switch val := currentExpected.(type) { + case float64: + fv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(fv)) { + assert.Equal(suite.T(), currentExpected, fv) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + sv, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, sv) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) } } } @@ -772,11 +833,477 @@ func (suite *testSelectDataframeSuite) TestQueryNonExistingMetric() { currentExpectedData := expectedData[col.Name()] suite.Require().Equal(len(currentExpectedData), col.Len()) currentExpected := currentExpectedData[i].Value - f, err := col.FloatAt(i) - assert.NoError(suite.T(), err) - if !(math.IsNaN(currentExpected) && math.IsNaN(f)) { - suite.Require().Equal(currentExpected, f) + switch val := currentExpected.(type) { + case float64: + f, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + if !(math.IsNaN(val) && math.IsNaN(f)) { + assert.Equal(suite.T(), currentExpected, f) + } + case int: + iv, err := col.FloatAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), float64(val), iv) + case string: + s, err := col.StringAt(i) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), val, s) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseStringAndNumericColumnsDataframe() { + requireCtx := suite.Require() + adapter, err := tsdb.NewV3ioAdapter(suite.v3ioConfig, nil, nil) + requireCtx.NoError(err, "failed to create v3io adapter") + + metricCpu := "cpu" + metricLog := "log" + labels := utils.LabelsFromStringList("os", "linux") + labelsWithNameLog := append(labels, utils.LabelsFromStringList("__name__", metricLog)...) + + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + + timeColumnLog := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + + dataLog := []interface{}{"a", "c", "d", "e"} + expectedColumns := map[string][]interface{}{ + metricCpu: {10.0, 20.0, 30.0, math.NaN(), 50.0}, + metricLog: {"a", "", "c", "d", "e"}} + appender, err := adapter.Appender() + requireCtx.NoError(err, "failed to create v3io appender") + + refLog, err := appender.Add(labelsWithNameLog, timeColumnLog[0], dataLog[0]) + suite.NoError(err, "failed to add data to the TSDB appender") + for i := 1; i < len(timeColumnLog); i++ { + appender.AddFast(labels, refLog, timeColumnLog[i], dataLog[i]) + } + + _, err = appender.WaitForCompletion(0) + requireCtx.NoError(err, "failed to wait for TSDB append completion") + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: metricCpu, + Labels: labels, + Data: []tsdbtest.DataPoint{ + {suite.basicQueryTime, 10.0}, + {int64(suite.basicQueryTime + tsdbtest.MinuteInMillis), 20.0}, + {suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, 30.0}, + {suite.basicQueryTime + 4*tsdbtest.MinuteInMillis, 50.0}}}, + }}) + + tsdbtest.InsertData(suite.T(), testParams) + + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: metricCpu}, {Metric: metricLog}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 5*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + var v interface{} + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + bothNaN := math.IsNaN(expectedColumns[column.Name()][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + requireCtx.Equal(expectedColumns[column.Name()][i], v, "column %v does not match at index %v", column.Name(), i) + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseNumericColumnsWithEmptyColumnsDataframe() { + requireCtx := suite.Require() + labelSetLinux := utils.LabelsFromStringList("os", "linux") + labelSetWindows := utils.LabelsFromStringList("os", "windows") + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]interface{}{ + "cpu_0": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1": {10.0, 20.0, 30.0, math.NaN(), 50.0, math.NaN(), 22.0, 33.0, math.NaN(), 55.0}, + "cpu_2": {math.NaN(), math.NaN(), math.NaN(), 40.4, 50.5, 10.0, 20.0, math.NaN(), 40.0, 50.0}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "cpu_0", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0] - 68*tsdbtest.HoursInMillis, 10.0}, + {expectedTimeColumn[1] - 69*tsdbtest.HoursInMillis, 20.0}, + {expectedTimeColumn[2] - 70*tsdbtest.HoursInMillis, 30.0}, + {expectedTimeColumn[3] - 71*tsdbtest.HoursInMillis, 40.0}, + {expectedTimeColumn[4] - 72*tsdbtest.HoursInMillis, 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + {expectedTimeColumn[2], 30.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + // NA + // NA + {expectedTimeColumn[3], 40.4}, + {expectedTimeColumn[4], 50.5}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + // NA + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + // NA + {expectedTimeColumn[1], 22.0}, + {expectedTimeColumn[2], 33.0}, + // NA + {expectedTimeColumn[4], 55.0}}}, + }}) + + adapter := tsdbtest.InsertData(suite.T(), testParams) + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu_0"}, {Metric: "cpu_1"}, {Metric: "cpu_2"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + rowId := -1 + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + rowId++ + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + var v interface{} + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + bothNaN := math.IsNaN(expectedColumns[column.Name()][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + expectedValue := expectedColumns[columnName][rowId] + if !math.IsNaN(expectedValue.(float64)) || !math.IsNaN(v.(float64)) { + requireCtx.Equal(expectedValue, v, "column %v does not match at index %v", columnName, rowId) + } + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseNumericColumnsWithPartialLabelsDataframe() { + requireCtx := suite.Require() + labelSetLinux := utils.LabelsFromStringList("os", "linux") + labelSetWindows := utils.LabelsFromStringList("os", "windows") + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]interface{}{ + "cpu_0": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1": {10.0, 20.0, 30.0, 40.0, 50.0, math.NaN(), 22.0, 33.0, math.NaN(), 55.0}, + "cpu_2": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), 10.0, 20.0, math.NaN(), 40.0, 50.0}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "cpu_0", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0] - 68*tsdbtest.HoursInMillis, 10.0}, + {expectedTimeColumn[1] - 69*tsdbtest.HoursInMillis, 20.0}, + {expectedTimeColumn[2] - 70*tsdbtest.HoursInMillis, 30.0}, + {expectedTimeColumn[3] - 71*tsdbtest.HoursInMillis, 40.0}, + {expectedTimeColumn[4] - 72*tsdbtest.HoursInMillis, 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + {expectedTimeColumn[2], 30.0}, + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + // NA + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + // NA + {expectedTimeColumn[1], 22.0}, + {expectedTimeColumn[2], 33.0}, + // NA + {expectedTimeColumn[4], 55.0}}}, + }}) + + adapter := tsdbtest.InsertData(suite.T(), testParams) + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu_0"}, {Metric: "cpu_1"}, {Metric: "cpu_2"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + rowId := -1 + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + rowId++ + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %v", i) + for _, columnName := range frame.Names() { + var v interface{} + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + bothNaN := math.IsNaN(expectedColumns[column.Name()][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + expectedValue := expectedColumns[columnName][rowId] + if !math.IsNaN(expectedValue.(float64)) || !math.IsNaN(v.(float64)) { + requireCtx.Equal(expectedValue, v, "column %v does not match at index %v", columnName, rowId) + } + } + } + } +} + +func (suite *testSelectDataframeSuite) TestSparseNumericColumnsWithNotExistingMetricDataframe() { + requireCtx := suite.Require() + labelSetLinux := utils.LabelsFromStringList("os", "linux") + labelSetWindows := utils.LabelsFromStringList("os", "windows") + expectedTimeColumn := []int64{ + suite.basicQueryTime, + suite.basicQueryTime + tsdbtest.MinuteInMillis, + suite.basicQueryTime + 2*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 3*tsdbtest.MinuteInMillis, + suite.basicQueryTime + 4*tsdbtest.MinuteInMillis} + expectedColumns := map[string][]interface{}{ + "cpu_0": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + "cpu_1": {10.0, 20.0, 30.0, 40.0, 50.0, math.NaN(), 22.0, 33.0, math.NaN(), 55.0}, + "cpu_2": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), 10.0, 20.0, math.NaN(), 40.0, 50.0}, + "fake": {math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN(), math.NaN()}, + } + + testParams := tsdbtest.NewTestParams(suite.T(), + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{ + tsdbtest.Metric{ + Name: "cpu_0", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0] - 68*tsdbtest.HoursInMillis, 10.0}, + {expectedTimeColumn[1] - 69*tsdbtest.HoursInMillis, 20.0}, + {expectedTimeColumn[2] - 70*tsdbtest.HoursInMillis, 30.0}, + {expectedTimeColumn[3] - 71*tsdbtest.HoursInMillis, 40.0}, + {expectedTimeColumn[4] - 72*tsdbtest.HoursInMillis, 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetLinux, + Data: []tsdbtest.DataPoint{ + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + {expectedTimeColumn[2], 30.0}, + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_2", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + {expectedTimeColumn[0], 10.0}, + {expectedTimeColumn[1], 20.0}, + // NA + {expectedTimeColumn[3], 40.0}, + {expectedTimeColumn[4], 50.0}}}, + tsdbtest.Metric{ + Name: "cpu_1", + Labels: labelSetWindows, + Data: []tsdbtest.DataPoint{ // out of test's time frame + // NA + {expectedTimeColumn[1], 22.0}, + {expectedTimeColumn[2], 33.0}, + // NA + {expectedTimeColumn[4], 55.0}}}, + }}) + + adapter := tsdbtest.InsertData(suite.T(), testParams) + querierV2, err := adapter.QuerierV2() + requireCtx.NoError(err, "failed to create querier") + + params := &pquerier.SelectParams{RequestedColumns: []pquerier.RequestedColumn{{Metric: "cpu_0"}, {Metric: "cpu_1"}, {Metric: "cpu_2"}, {Metric: "fake"}}, + From: suite.basicQueryTime, To: suite.basicQueryTime + 10*tsdbtest.MinuteInMillis} + iter, err := querierV2.SelectDataFrame(params) + requireCtx.NoError(err, "failed to execute query") + + rowId := -1 + var seriesCount int + for iter.NextFrame() { + seriesCount++ + frame, err := iter.GetFrame() + requireCtx.NoError(err) + indexCol := frame.Indices()[0] + + nullValuesMap := frame.NullValuesMap() + requireCtx.NotNil(nullValuesMap, "null value map should not be empty") + + for i := 0; i < indexCol.Len(); i++ { + rowId++ + t, _ := indexCol.TimeAt(i) + timeMillis := t.UnixNano() / int64(time.Millisecond) + requireCtx.Equal(expectedTimeColumn[i], timeMillis, "time column does not match at index %d", i) + for _, columnName := range frame.Names() { + var v interface{} + column, err := frame.Column(columnName) + requireCtx.NoError(err) + if column.DType() == frames.FloatType { + v, _ = column.FloatAt(i) + if v == math.NaN() { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + bothNaN := math.IsNaN(expectedColumns[column.Name()][i].(float64)) && math.IsNaN(v.(float64)) + if bothNaN { + continue + } + } else if column.DType() == frames.StringType { + v, _ = column.StringAt(i) + if v == "" { + requireCtx.True(nullValuesMap[i].NullColumns[columnName]) + } + } else { + suite.Fail(fmt.Sprintf("column type is not as expected: %v", column.DType())) + } + + expectedValue := expectedColumns[columnName][rowId] + if !math.IsNaN(expectedValue.(float64)) || !math.IsNaN(v.(float64)) { + requireCtx.Equal(expectedValue, v, "column %v does not match at index %d", columnName, rowId) } } } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go index 72d2f7ac..e2064425 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/downsample_query_integration_test.go @@ -120,7 +120,7 @@ func (suite *testDownsampleSuite) TestRawDataSinglePartitionWithDownSample() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") + suite.compareSingleMetric(data, expectedData) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -176,7 +176,7 @@ func (suite *testDownsampleSuite) TestRawDataDownSampleMultiPartitions() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") + suite.compareSingleMetric(data, expectedData) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go index 2d58da27..8346aea1 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/integration_test_basic_test.go @@ -66,3 +66,21 @@ func (suite *basicQueryTestSuite) TearDownTest() { tsdbtest.DeleteTSDB(suite.T(), suite.v3ioConfig) } } + +func (suite *basicQueryTestSuite) compareSingleMetric(data []tsdbtest.DataPoint, expected []tsdbtest.DataPoint) { + for i, dataPoint := range data { + suite.Require().True(dataPoint.Equals(expected[i]), "queried data does not match expected") + } +} + +func (suite *basicQueryTestSuite) compareSingleMetricWithAggregator(data []tsdbtest.DataPoint, expected map[string][]tsdbtest.DataPoint, agg string) { + for i, dataPoint := range data { + suite.Require().True(dataPoint.Equals(expected[agg][i]), "queried data does not match expected") + } +} + +func (suite *basicQueryTestSuite) compareMultipleMetrics(data []tsdbtest.DataPoint, expected map[string]map[string][]tsdbtest.DataPoint, metricName string, aggr string) { + for i, dataPoint := range data { + suite.Require().True(dataPoint.Equals(expected[metricName][aggr][i]), "queried data does not match expected") + } +} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go index 4b63277b..cb7e646f 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/query_sql_integration_test.go @@ -88,8 +88,7 @@ func (suite *testSQLSyntaxQuerySuite) TestGroupByOneLabelSinglePartition() { agg := set.At().Labels().Get(aggregate.AggregateLabel) groupByValue := set.At().Labels().Get("os") suite.Require().NoError(err) - - suite.Require().Equal(expected[groupByValue][agg], data, "queried data does not match expected") + suite.compareMultipleMetrics(data, expected, groupByValue, agg) } suite.Require().Equal(4, seriesCount, "series count didn't match expected") @@ -172,8 +171,7 @@ func (suite *testSQLSyntaxQuerySuite) TestGroupByMultipleLabelsSinglePartition() labelsStr := strings.Join(groupByValue, "-") suite.Require().NoError(err) - - suite.Require().Equal(expected[labelsStr][agg], data, "queried data does not match expected") + suite.compareMultipleMetrics(data, expected, labelsStr, agg) } suite.Require().Equal(6, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go index d7c94207..3a61864c 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/raw_query_integration_test.go @@ -3,6 +3,7 @@ package pqueriertest import ( + "errors" "fmt" "math" "testing" @@ -73,7 +74,7 @@ func (suite *testRawQuerySuite) TestRawDataSinglePartition() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") + suite.compareSingleMetric(data, expectedData) } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -129,7 +130,7 @@ func (suite *testRawQuerySuite) TestRawDataMultiplePartitions() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") + suite.compareSingleMetric(data, expectedData) } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -186,7 +187,7 @@ func (suite *testRawQuerySuite) TestFilterOnLabel() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") + suite.compareSingleMetric(data, expectedData) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -281,7 +282,9 @@ func (suite *testRawQuerySuite) TestSelectRawDataByRequestedColumns() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expected, data, "queried data does not match expected") + for i, dataPoint := range expected { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -346,7 +349,9 @@ func (suite *testRawQuerySuite) TestRawDataMultipleMetrics() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expectedData[name], data, "queried data does not match expected") + for i, dataPoint := range expectedData[name] { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -492,7 +497,9 @@ func (suite *testRawQuerySuite) TestQueryMultipleMetricsWithMultipleLabelSets() suite.T().Fatal(err) } - assert.Equal(suite.T(), expectedData[fmt.Sprintf("%v-%v", name, os)], data, "queried data does not match expected") + for i, dataPoint := range expectedData[fmt.Sprintf("%v-%v", name, os)] { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -541,7 +548,7 @@ func (suite *testRawQuerySuite) TestDifferentLabelSetsInDifferentPartitions() { suite.T().Fatal(err) } - suite.Require().Equal(expected, data, "queried data does not match expected") + suite.compareSingleMetric(data, expected) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -591,7 +598,9 @@ func (suite *testRawQuerySuite) TestDifferentMetricsInDifferentPartitions() { suite.T().Fatal(err) } - suite.Require().Equal(expected, data, "queried data does not match expected") + for i, dataPoint := range expected { + suite.Require().True(dataPoint.Equals(data[i]), "queried data does not match expected") + } } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -763,7 +772,20 @@ func (suite *testRawQuerySuite) TestLoadPartitionsFromAttributes() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expectedData, data, "queried data does not match expected") + for i := 0; i < len(expectedData); i++ { + assert.Equal(suite.T(), expectedData[i].Time, data[i].Time) + currentExpected := expectedData[i].Value + switch val := currentExpected.(type) { + case float64: + assert.Equal(suite.T(), val, data[i].Value) + case int: + assert.Equal(suite.T(), float64(val), data[i].Value) + case string: + assert.Equal(suite.T(), val, data[i].Value) + default: + assert.Error(suite.T(), errors.New("unsupported data type")) + } + } } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go index 811a3c1d..4c579fff 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/server_aggregates_integration_test.go @@ -75,7 +75,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesSinglePartition() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -135,7 +135,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesSinglePartitionNegative suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 3, seriesCount, "series count didn't match expected") @@ -200,7 +200,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesMultiPartition() { suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") @@ -263,7 +263,7 @@ func (suite *testServerAggregatesSuite) TestRawAggregatesMultiPartitionNonConcre suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), len(expected), seriesCount, "series count didn't match expected") @@ -319,7 +319,7 @@ func (suite *testServerAggregatesSuite) TestSelectServerAggregatesAndRawByReques suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 2, seriesCount, "series count didn't match expected") @@ -375,9 +375,8 @@ func (suite *testServerAggregatesSuite) TestAggregatesWithDisabledClientAggregat if err != nil { suite.T().Fatal(err) } - currentExpected, ok := expected[agg] - suite.Require().Equal(true, ok, "got unexpected aggregate result") - assert.Equal(suite.T(), currentExpected, data, "queried data does not match expected") + + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go index a8943199..be0a304d 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/pqueriertest/windowed_aggregation_integration_test.go @@ -80,7 +80,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowBigg suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -144,7 +144,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowSmal suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -208,7 +208,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowEqua suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -273,7 +273,7 @@ func (suite *testWindowAggregationSuite) TestClientWindowedAggregationWindowExce suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -337,7 +337,7 @@ func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowBigg suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -401,7 +401,7 @@ func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowEqua suite.T().Fatal(err) } - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") @@ -471,8 +471,7 @@ func (suite *testWindowAggregationSuite) TestServerWindowedAggregationWindowEqua if err != nil { suite.T().Fatal(err) } - - assert.Equal(suite.T(), expected[agg], data, "queried data does not match expected") + suite.compareSingleMetricWithAggregator(data, expected, agg) } assert.Equal(suite.T(), 1, seriesCount, "series count didn't match expected") diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go index ed221871..441242e1 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go @@ -152,7 +152,7 @@ func (q *V3ioQuerier) baseSelectQry(params *SelectParams, showAggregateLabel boo // TODO: should be checked in config if !isPowerOfTwo(q.cfg.QryWorkers) { - return nil, errors.New("Query workers num must be a power of 2 and > 0 !") + return nil, errors.New("query workers num must be a power of 2 and > 0") } // If the config is set to use only client configuration override the query parameter. diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go index 6e615825..6c6861c7 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go @@ -107,7 +107,7 @@ func (queryCtx *selectQueryContext) start(parts []*partmgr.DBPartition, params * queryCtx.totalColumns = queryCtx.frameList[0].Len() } - return NewFrameIterator(queryCtx) + return newFrameIterator(queryCtx) } func (queryCtx *selectQueryContext) metricsAggregatesToString(metric string) (string, bool) { @@ -307,9 +307,8 @@ func (queryCtx *selectQueryContext) processQueryResults(query *partQuery) error intEncoding, err := strconv.Atoi(encodingStr) if err != nil { return fmt.Errorf("error parsing encoding type of chunk, got: %v, error: %v", encodingStr, err) - } else { - encoding = chunkenc.Encoding(intEncoding) } + encoding = chunkenc.Encoding(intEncoding) } results := qryResults{name: name, encoding: encoding, query: query, fields: query.GetFields()} @@ -341,7 +340,7 @@ func (queryCtx *selectQueryContext) processQueryResults(query *partQuery) error frame, ok := queryCtx.dataFrames[hash] if !ok { var err error - frame, err = NewDataFrame(queryCtx.columnsSpec, + frame, err = newDataFrame(queryCtx.columnsSpec, queryCtx.getOrCreateTimeColumn(), lset, hash, @@ -400,7 +399,7 @@ func (queryCtx *selectQueryContext) createColumnSpecs() ([]columnMeta, map[strin } else if queryCtx.isCrossSeriesAggregate { return nil, nil, fmt.Errorf("can not aggregate both over time and across series aggregates") } - aggr, err := aggregate.AggregateFromString(col.GetFunction()) + aggr, err := aggregate.FromString(col.GetFunction()) if err != nil { return nil, nil, err } @@ -476,11 +475,15 @@ func (queryCtx *selectQueryContext) getOrCreateTimeColumn() Column { func (queryCtx *selectQueryContext) generateTimeColumn() Column { columnMeta := columnMeta{metric: "time"} - timeColumn := NewDataColumn("time", columnMeta, queryCtx.getResultBucketsSize(), frames.TimeType) + timeColumn := newDataColumn("time", columnMeta, queryCtx.getResultBucketsSize(), frames.TimeType) i := 0 for t := queryCtx.queryParams.From; t <= queryCtx.queryParams.To; t += queryCtx.queryParams.Step { - timeColumn.SetDataAt(i, time.Unix(t/1000, (t%1000)*1e6)) - i++ + err := timeColumn.SetDataAt(i, time.Unix(t/1000, (t%1000)*1e6)) + if err != nil { + queryCtx.logger.ErrorWith(errors.Wrap(err, fmt.Sprintf("could not set data"))) + } else { + i++ + } } return timeColumn } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/selectQueryContext_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/selectQueryContext_test.go index 1b3a874d..0a58d205 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/selectQueryContext_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/selectQueryContext_test.go @@ -163,6 +163,6 @@ func TestNegativeCreateColumnSpecs(t *testing.T) { } func toAggr(str string) aggregate.AggrType { - aggr, _ := aggregate.AggregateFromString(str) + aggr, _ := aggregate.FromString(str) return aggr } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go index 67b7a717..ccb5aefd 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go @@ -115,7 +115,10 @@ func parseFuncExpr(expr *sqlparser.FuncExpr, destCol *RequestedColumn) error { case *sqlparser.ColName: destCol.Metric = sqlparser.String(innerExpr.Name) case *sqlparser.FuncExpr: - parseFuncExpr(innerExpr, destCol) + err := parseFuncExpr(innerExpr, destCol) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("could not parse expr")) + } } } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/series.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/series.go index f256fc2a..6d821855 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/series.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/series.go @@ -49,7 +49,13 @@ func (s *V3ioSeries) Labels() utils.Labels { return s.lset } // Get the unique series key for sorting func (s *V3ioSeries) GetKey() uint64 { if s.hash == 0 { - s.hash = s.lset.HashWithMetricName() + val, err := s.lset.HashWithMetricName() + if err != nil { + s.set.logger.Error(err) + return 0 + } + s.hash = val + } return s.hash } @@ -257,7 +263,7 @@ func NewAggrSeries(set *V3ioSeriesSet, aggr aggregate.AggrType) *V3ioSeries { type aggrSeriesIterator struct { set *V3ioSeriesSet - aggrSet *aggregate.AggregateSet + aggrSet *aggregate.Set aggrType aggregate.AggrType index int err error diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go index 16c41f7b..ab2bbbb4 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go @@ -42,11 +42,11 @@ type V3ioSeriesSet struct { interval int64 nullSeries bool overlapWin []int - aggrSeries *aggregate.AggregateSeries + aggrSeries *aggregate.Series aggrIdx int canAggregate bool currSeries utils.Series - aggrSet *aggregate.AggregateSet + aggrSet *aggregate.Set noAggrLbl bool baseTime int64 } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go deleted file mode 100644 index dfe24a51..00000000 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/delete_integration_test.go +++ /dev/null @@ -1,1141 +0,0 @@ -// +build integration - -package tsdb_test - -import ( - "fmt" - "math" - "path" - "strconv" - "testing" - "time" - - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - v3io "github.com/v3io/v3io-go/pkg/dataplane" - "github.com/v3io/v3io-tsdb/pkg/config" - "github.com/v3io/v3io-tsdb/pkg/pquerier" - . "github.com/v3io/v3io-tsdb/pkg/tsdb" - "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" - "github.com/v3io/v3io-tsdb/pkg/utils" -) - -func timeStringToMillis(timeStr string) int64 { - ta, _ := time.Parse(time.RFC3339, timeStr) - return ta.Unix() * 1000 -} -func TestDeleteTable(t *testing.T) { - ta, _ := time.Parse(time.RFC3339, "2018-10-03T05:00:00Z") - t1 := ta.Unix() * 1000 - tb, _ := time.Parse(time.RFC3339, "2018-10-07T05:00:00Z") - t2 := tb.Unix() * 1000 - tc, _ := time.Parse(time.RFC3339, "2018-10-11T05:00:00Z") - t3 := tc.Unix() * 1000 - td, _ := time.Parse(time.RFC3339, "now + 1w") - futurePoint := td.Unix() * 1000 - - defaultTimeMillis := timeStringToMillis("2019-07-21T00:00:00Z") - generalData := []tsdbtest.DataPoint{ - // partition 1 - // chunk a - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - // chunk b - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - // partition 2 - // chunk a - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - // chunk b - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - // partition 3 - // chunk a - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - // chunk b - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}} - partitions1StartTime := timeStringToMillis("2019-07-21T00:00:00Z") - partitions2StartTime := timeStringToMillis("2019-07-23T00:00:00Z") - partitions3StartTime := timeStringToMillis("2019-07-25T00:00:00Z") - - testCases := []struct { - desc string - deleteParams DeleteParams - data tsdbtest.TimeSeries - expectedData map[string][]tsdbtest.DataPoint - expectedPartitions []int64 - ignoreReason string - }{ - {desc: "Should delete all table by time", - deleteParams: DeleteParams{ - From: 0, - To: 9999999999999, - IgnoreErrors: true, - }, - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}, - {Time: futurePoint, Value: 555.5}}, - }}, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": {}}, - }, - {desc: "Should delete all table by deleteAll", - deleteParams: DeleteParams{ - From: 0, - To: t1, - DeleteAll: true, - IgnoreErrors: true, - }, - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, - {Time: t2, Value: 333.3}, - {Time: t3, Value: 444.4}, - {Time: futurePoint, Value: 555.5}}, - }}, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": {}}, - }, - {desc: "Should delete whole partitions", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions2StartTime - 1, - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete whole partitions with filter", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "win"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions2StartTime - 1, - Filter: "os == 'win'", - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu-win": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "cpu-linux": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete whole partitions specific metrics", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }, tsdbtest.Metric{ - Name: "disk", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions2StartTime - 1, - Metrics: []string{"cpu"}, - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "disk": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete whole partitions specific metrics with filter", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "win"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "disk", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions2StartTime - 1, - Metrics: []string{"cpu"}, - Filter: "os == 'linux'", - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": {{Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "cpu-win": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "disk-linux": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete whole chunks", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions1StartTime + tsdbtest.HoursInMillis, - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": { - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete whole chunks with filter", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "win"), - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions1StartTime + tsdbtest.HoursInMillis, - Filter: "os == 'linux'", - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "cpu-win": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete whole chunks specific metrics", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }, tsdbtest.Metric{ - Name: "disk", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions1StartTime + tsdbtest.HoursInMillis, - Metrics: []string{"cpu"}, - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": { - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "disk": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete whole chunks specific metrics with filter", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "win"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "disk", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions1StartTime + tsdbtest.HoursInMillis, - Metrics: []string{"cpu"}, - Filter: "os == 'linux'", - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "cpu-win": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "disk-linux": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - - { - desc: "Should delete partial chunk in the start", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions1StartTime + 4*tsdbtest.MinuteInMillis, - }, - expectedData: map[string][]tsdbtest.DataPoint{ - "cpu": { - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete partial chunk in the middle", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime + 3*tsdbtest.MinuteInMillis, - To: partitions1StartTime + 7*tsdbtest.MinuteInMillis, - }, - expectedData: map[string][]tsdbtest.DataPoint{ - "cpu": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete partial chunk in the end", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime + 6*tsdbtest.MinuteInMillis, - To: partitions1StartTime + 11*tsdbtest.MinuteInMillis, - }, - expectedData: map[string][]tsdbtest.DataPoint{ - "cpu": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete partial chunk with filter", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "win"), - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, - Filter: "os == 'linux'", - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "cpu-win": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - }, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete partial chunk specific metrics", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }, tsdbtest.Metric{ - Name: "disk", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, - Metrics: []string{"cpu"}, - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": { - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "disk": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete partial chunk specific metrics with filter", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "win"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "disk", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime, - To: partitions1StartTime + 6*tsdbtest.MinuteInMillis, - Metrics: []string{"cpu"}, - Filter: "os == 'linux'", - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "cpu-win": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "disk-linux": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete mixed partitions and chunks", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime + tsdbtest.HoursInMillis, - To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions3StartTime}, - }, - { - desc: "Should delete mixed partitions and chunks with filter", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "win"), - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime + tsdbtest.HoursInMillis, - To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, - Filter: "os == 'linux'", - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "cpu-win": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - }, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete mixed partitions and chunks specific metrics", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }, tsdbtest.Metric{ - Name: "disk", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime + tsdbtest.HoursInMillis, - To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, - Metrics: []string{"cpu"}, - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "disk": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete mixed partitions and chunks specific metrics with filter", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "cpu", - Labels: utils.LabelsFromStringList("os", "win"), - Data: generalData, - }, tsdbtest.Metric{ - Name: "disk", - Labels: utils.LabelsFromStringList("os", "linux"), - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime + tsdbtest.HoursInMillis, - To: partitions3StartTime + 6*tsdbtest.MinuteInMillis, - Metrics: []string{"cpu"}, - Filter: "os == 'linux'", - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu-linux": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "cpu-win": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}, - "disk-linux": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete partially last chunk and update max time", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions3StartTime + 1*tsdbtest.HoursInMillis + 6*tsdbtest.MinuteInMillis, - To: partitions3StartTime + 1*tsdbtest.HoursInMillis + 11*tsdbtest.MinuteInMillis, - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete whole last chunk and update max time", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions3StartTime + 1*tsdbtest.HoursInMillis, - To: partitions3StartTime + 2*tsdbtest.HoursInMillis, - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - { - desc: "Should delete whole all samples in chunk but time range is not bigger then chunk", - data: tsdbtest.TimeSeries{tsdbtest.Metric{ - Name: "cpu", - Data: generalData, - }}, - deleteParams: DeleteParams{ - From: partitions1StartTime + 1*tsdbtest.HoursInMillis + 2*tsdbtest.MinuteInMillis, - To: partitions1StartTime + 2*tsdbtest.HoursInMillis + 11*tsdbtest.MinuteInMillis, - }, - expectedData: map[string][]tsdbtest.DataPoint{"cpu": { - {Time: defaultTimeMillis, Value: 1.2}, - {Time: defaultTimeMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 2*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}, - - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.3}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.4}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 5*tsdbtest.MinuteInMillis, Value: 1.2}, - {Time: defaultTimeMillis + 4*tsdbtest.DaysInMillis + 1*tsdbtest.HoursInMillis + 10*tsdbtest.MinuteInMillis, Value: 1.3}}}, - expectedPartitions: []int64{partitions1StartTime, partitions2StartTime, partitions3StartTime}, - }, - } - - for _, test := range testCases { - t.Run(test.desc, func(t *testing.T) { - if test.ignoreReason != "" { - t.Skip(test.ignoreReason) - } - testDeleteTSDBCase(t, - tsdbtest.NewTestParams(t, - tsdbtest.TestOption{ - Key: tsdbtest.OptDropTableOnTearDown, - Value: !test.deleteParams.DeleteAll}, - tsdbtest.TestOption{ - Key: tsdbtest.OptTimeSeries, - Value: test.data}, - ), test.deleteParams, test.expectedData, test.expectedPartitions) - }) - } -} - -func getCurrentPartitions(test *testing.T, container v3io.Container, path string) []int64 { - input := &v3io.GetItemInput{Path: path + "/.schema", - AttributeNames: []string{"*"}} - res, err := container.GetItemSync(input) - if err != nil { - test.Fatal(errors.Wrap(err, "failed to get schema")) - } - output := res.Output.(*v3io.GetItemOutput) - var partitions []int64 - for part := range output.Item { - partitionsStartTime, _ := strconv.ParseInt(part[1:], 10, 64) // parse attribute and discard attribute prefix - partitions = append(partitions, partitionsStartTime) - } - return partitions -} - -func testDeleteTSDBCase(test *testing.T, testParams tsdbtest.TestParams, deleteParams DeleteParams, - expectedData map[string][]tsdbtest.DataPoint, expectedPartitions []int64) { - - adapter, teardown := tsdbtest.SetUpWithData(test, testParams) - defer teardown() - - container, err := utils.CreateContainer(adapter.GetLogger("container"), testParams.V3ioConfig(), adapter.HttpTimeout) - if err != nil { - test.Fatalf("failed to create new container. reason: %s", err) - } - - if err := adapter.DeleteDB(deleteParams); err != nil { - test.Fatalf("Failed to delete DB. reason: %s", err) - } - - if !deleteParams.DeleteAll { - actualPartitions := getCurrentPartitions(test, container, testParams.V3ioConfig().TablePath) - assert.ElementsMatch(test, expectedPartitions, actualPartitions, "remaining partitions are not as expected") - - qry, err := adapter.QuerierV2() - if err != nil { - test.Fatalf("Failed to create Querier. reason: %v", err) - } - - params := &pquerier.SelectParams{ - From: 0, - To: math.MaxInt64, - Filter: "1==1", - } - set, err := qry.Select(params) - if err != nil { - test.Fatalf("Failed to run Select. reason: %v", err) - } - - for set.Next() { - series := set.At() - labels := series.Labels() - osLabel := labels.Get("os") - metricName := labels.Get(config.PrometheusMetricNameAttribute) - iter := series.Iterator() - if iter.Err() != nil { - test.Fatalf("Failed to query data series. reason: %v", iter.Err()) - } - - actual, err := iteratorToSlice(iter) - if err != nil { - test.Fatal(err) - } - expectedDataKey := metricName - if osLabel != "" { - expectedDataKey = fmt.Sprintf("%v-%v", expectedDataKey, osLabel) - } - - assert.ElementsMatch(test, expectedData[expectedDataKey], actual, - "result data for '%v' didn't match, expected: %v\n actual: %v\n", expectedDataKey, expectedData[expectedDataKey], actual) - - } - if set.Err() != nil { - test.Fatalf("Failed to query metric. reason: %v", set.Err()) - } - } else { - container, tablePath := adapter.GetContainer() - tableSchemaPath := path.Join(tablePath, config.SchemaConfigFileName) - - // Validate: schema does not exist - _, err := container.GetObjectSync(&v3io.GetObjectInput{Path: tableSchemaPath}) - if err != nil { - if utils.IsNotExistsError(err) { - // OK - expected - } else { - test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tableSchemaPath, err) - } - } - - // Validate: table does not exist - _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: tablePath}) - if err != nil { - if utils.IsNotExistsError(err) { - // OK - expected - } else { - test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tablePath, err) - } - } - } -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go index 7f1a498b..1bffbac5 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go @@ -54,7 +54,7 @@ func newSchema(samplesIngestionRate, aggregationGranularity, aggregatesList stri parsedCrossLabelSets := aggregate.ParseCrossLabelSets(crossLabelSets) if len(parsedCrossLabelSets) > 0 && len(aggregates) == 0 { - return nil, errors.New("Cross label aggregations must be used in conjunction with aggregations.") + return nil, errors.New("Cross label aggregations must be used in conjunction with aggregations") } if len(aggregates) == 0 { @@ -120,13 +120,13 @@ func calculatePartitionAndChunkInterval(rateInHours, minChunkSize, maxChunkSize, chunkInterval := maxNumberOfEventsPerChunk / rateInHours if chunkInterval == 0 { - return "", "", fmt.Errorf("The samples ingestion rate (%v/h) is too high.", rateInHours) + return "", "", fmt.Errorf("the samples ingestion rate (%v/h) is too high", rateInHours) } // Make sure the expected chunk size is greater then the supported minimum. if chunkInterval < minNumberOfEventsPerChunk/rateInHours { return "", "", fmt.Errorf( - "The calculated chunk size is smaller than the minimum: samples ingestion rate = %v/h, calculated chunk interval = %v, minimum size = %v", + "the calculated chunk size is smaller than the minimum: samples ingestion rate = %v/h, calculated chunk interval = %v, minimum size = %v", rateInHours, chunkInterval, minChunkSize) } @@ -137,7 +137,7 @@ func calculatePartitionAndChunkInterval(rateInHours, minChunkSize, maxChunkSize, numberOfChunksInPartition += 24 } if numberOfChunksInPartition == 0 { - return "", "", errors.Errorf("The samples ingestion rate (%v/h) is too high - cannot fit a partition in a day interval with the calculated chunk size (%v).", rateInHours, chunkInterval) + return "", "", errors.Errorf("the samples ingestion rate (%v/h) is too high - cannot fit a partition in a day interval with the calculated chunk size (%v)", rateInHours, chunkInterval) } partitionInterval := numberOfChunksInPartition * chunkInterval @@ -145,7 +145,7 @@ func calculatePartitionAndChunkInterval(rateInHours, minChunkSize, maxChunkSize, } func rateToHours(samplesIngestionRate string) (int, error) { - parsingError := errors.New(`Invalid samples ingestion rate. The rate must be of the format "[0-9]+/[smh]". For example, "12/m".`) + parsingError := errors.New(`Invalid samples ingestion rate. The rate must be of the format "[0-9]+/[smh]". For example, "12/m"`) if len(samplesIngestionRate) < 3 { return 0, parsingError @@ -162,7 +162,7 @@ func rateToHours(samplesIngestionRate string) (int, error) { return 0, errors.Wrap(err, parsingError.Error()) } if i <= 0 { - return 0, fmt.Errorf("Invalid samples ingestion rate (%s). The rate cannot have a negative number of samples.", samplesIngestionRate) + return 0, fmt.Errorf("invalid samples ingestion rate (%s). The rate cannot have a negative number of samples", samplesIngestionRate) } switch last { case 's': @@ -184,13 +184,13 @@ func validateAggregatesGranularity(aggregationGranularity string, partitionInter } if dayMillis%duration != 0 && duration%dayMillis != 0 { - return errors.New("The aggregation granularity should be a divisor or a dividend of 1 day. Examples: \"10m\"; \"30m\"; \"2h\".") + return errors.New("the aggregation granularity should be a divisor or a dividend of 1 day. Examples: \"10m\"; \"30m\"; \"2h\"") } if hasAggregates { partitionIntervalDuration, _ := utils.Str2duration(partitionInterval) // safe to ignore error since we create 'partitionInterval' if partitionIntervalDuration/duration > MaxV3ioArraySize { - return errors.New("The aggregation granularity is too close to the ingestion rate provided. Try increasing the granularity to get an aggregation performance impact.") + return errors.New("the aggregation granularity is too close to the ingestion rate provided. Try increasing the granularity to get an aggregation performance impact") } } return nil diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config.go index fcf1aed3..58b259c8 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/config.go @@ -10,6 +10,8 @@ import ( ) const TsdbDefaultTestConfigPath = "testdata" + +// nolint: deadcode,varcheck const relativeProjectPath = "src/github.com/v3io/v3io-tsdb" /* diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go index 4cfc9a6a..1d3b344d 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/tsdbtest.go @@ -13,6 +13,8 @@ import ( "github.com/v3io/v3io-tsdb/internal/pkg/performance" "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/pquerier" + // nolint: golint . "github.com/v3io/v3io-tsdb/pkg/tsdb" "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils" "github.com/v3io/v3io-tsdb/pkg/utils" @@ -24,12 +26,59 @@ const DaysInMillis = 24 * HoursInMillis type DataPoint struct { Time int64 - Value float64 + Value interface{} +} + +func (dp DataPoint) Equals(other DataPoint) bool { + if &dp.Time != &other.Time { + return true + } + if dp.Time != other.Time { + return false + } + + switch dpVal := dp.Value.(type) { + case float64: + switch oVal := other.Value.(type) { + case float64: + return dpVal == oVal + case int: + return dpVal == float64(oVal) + default: + return false + } + case int: + switch oVal := other.Value.(type) { + case float64: + return float64(dpVal) == oVal + case int: + return dpVal == oVal + default: + return false + } + case string: + switch oVal := other.Value.(type) { + case string: + return oVal == dpVal + case float64: + soVal := fmt.Sprintf("%f", oVal) + return dpVal == soVal + case int: + soVal := fmt.Sprintf("%d", oVal) + return dpVal == soVal + default: + return false + } + default: + return false + } } + type Metric struct { - Name string - Labels utils.Labels - Data []DataPoint + Name string + Labels utils.Labels + Data []DataPoint + ExpectedCount *int } type TimeSeries []Metric @@ -100,7 +149,8 @@ func DeleteTSDB(t testing.TB, v3ioConfig *config.V3ioConfig) { t.Fatalf("Failed to create an adapter. Reason: %s", err) } - if err := adapter.DeleteDB(DeleteParams{DeleteAll: true, IgnoreErrors: true}); err != nil { + now := time.Now().Unix() * 1000 // Current time (now) in milliseconds + if err := adapter.DeleteDB(true, true, 0, now); err != nil { t.Fatalf("Failed to delete a TSDB instance (table) on teardown. Reason: %s", err) } } @@ -112,8 +162,8 @@ func CreateTestTSDB(t testing.TB, v3ioConfig *config.V3ioConfig) { func CreateTestTSDBWithAggregates(t testing.TB, v3ioConfig *config.V3ioConfig, aggregates string) { schema := testutils.CreateSchema(t, aggregates) if err := CreateTSDB(v3ioConfig, schema); err != nil { - v3ioConfigAsJson, _ := json2.MarshalIndent(v3ioConfig, "", " ") - t.Fatalf("Failed to create a TSDB instance (table). Reason: %v\nConfiguration:\n%s", err, string(v3ioConfigAsJson)) + v3ioConfigAsJSON, _ := json2.MarshalIndent(v3ioConfig, "", " ") + t.Fatalf("Failed to create a TSDB instance (table). Reason: %v\nConfiguration:\n%s", err, string(v3ioConfigAsJSON)) } } @@ -126,7 +176,13 @@ func tearDown(t testing.TB, v3ioConfig *config.V3ioConfig, testParams TestParams func SetUp(t testing.TB, testParams TestParams) func() { v3ioConfig := testParams.V3ioConfig() - v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) + + if overrideTableName, ok := testParams["override_test_name"]; ok { + v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%v", overrideTableName)) + } else { + v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) + } + CreateTestTSDB(t, v3ioConfig) // Measure performance @@ -134,9 +190,11 @@ func SetUp(t testing.TB, testParams TestParams) func() { if err != nil { t.Fatalf("Unable to initialize the performance metrics reporter. Reason: %v", err) } + // nolint: errcheck metricReporter.Start() return func() { + // nolint: errcheck defer metricReporter.Stop() tearDown(t, v3ioConfig, testParams) } @@ -152,8 +210,8 @@ func SetUpWithDBConfig(t *testing.T, schema *config.Schema, testParams TestParam v3ioConfig := testParams.V3ioConfig() v3ioConfig.TablePath = PrefixTablePath(fmt.Sprintf("%s-%d", t.Name(), time.Now().Nanosecond())) if err := CreateTSDB(v3ioConfig, schema); err != nil { - v3ioConfigAsJson, _ := json2.MarshalIndent(v3ioConfig, "", " ") - t.Fatalf("Failed to create a TSDB instance (table). Reason: %s\nConfiguration:\n%s", err, string(v3ioConfigAsJson)) + v3ioConfigAsJSON, _ := json2.MarshalIndent(v3ioConfig, "", " ") + t.Fatalf("Failed to create a TSDB instance (table). Reason: %s\nConfiguration:\n%s", err, string(v3ioConfigAsJSON)) } // Measure performance @@ -161,9 +219,11 @@ func SetUpWithDBConfig(t *testing.T, schema *config.Schema, testParams TestParam if err != nil { t.Fatalf("Unable to initialize the performance metrics reporter. Error: %v", err) } + // nolint: errcheck metricReporter.Start() return func() { + // nolint: errcheck defer metricReporter.Stop() tearDown(t, v3ioConfig, testParams) } @@ -192,7 +252,10 @@ func InsertData(t *testing.T, testParams TestParams) *V3ioAdapter { t.Fatalf("Failed to add data to the TSDB appender. Reason: %s", err) } for _, curr := range metric.Data[1:] { - appender.AddFast(labels, ref, curr.Time, curr.Value) + err := appender.AddFast(labels, ref, curr.Time, curr.Value) + if err != nil { + t.Fatalf("Failed to AddFast. Reason: %s", err) + } } if _, err := appender.WaitForCompletion(0); err != nil { @@ -216,12 +279,17 @@ func ValidateCountOfSamples(t testing.TB, adapter *V3ioAdapter, metricName strin stepSize = queryAggStep } - qry, err := adapter.Querier(nil, startTimeMs-stepSize, endTimeMs) + qry, err := adapter.QuerierV2() if err != nil { t.Fatal(err, "Failed to create a Querier instance.") } - set, err := qry.Select("", "count", stepSize, fmt.Sprintf("starts(__name__, '%v')", metricName)) + selectParams := &pquerier.SelectParams{From: startTimeMs - stepSize, + To: endTimeMs, + Functions: "count", + Step: stepSize, + Filter: fmt.Sprintf("starts(__name__, '%v')", metricName)} + set, _ := qry.Select(selectParams) var actualCount int for set.Next() { @@ -258,11 +326,11 @@ func ValidateRawData(t testing.TB, adapter *V3ioAdapter, metricName string, star t.Fatal(err, "Failed to create a Querier instance.") } - set, err := qry.Select(metricName, "", 0, "") + set, _ := qry.Select(metricName, "", 0, "") for set.Next() { // Start over for each label set - var lastDataPoint = &DataPoint{Time: -1, Value: -1.0} + var lastDataPoint *DataPoint if set.Err() != nil { t.Fatal(set.Err(), "Failed to get the next element from a result set.") @@ -277,12 +345,16 @@ func ValidateRawData(t testing.TB, adapter *V3ioAdapter, metricName string, star currentTime, currentValue := iter.At() currentDataPoint := &DataPoint{Time: currentTime, Value: currentValue} - if lastDataPoint.Value >= 0 { - // Note: We cast float to integer to eliminate the risk of a - // precision error - if !isValid(lastDataPoint, currentDataPoint) { - t.Fatalf("The raw-data consistency check failed: metric name='%s'\n\tisValid(%v, %v) == false", - metricName, lastDataPoint, currentDataPoint) + if lastDataPoint != nil { + switch dataType := lastDataPoint.Value.(type) { + case string, float64, int, int64: + // Note: We cast float to integer to eliminate the risk of a precision error + if !isValid(lastDataPoint, currentDataPoint) { + t.Fatalf("The raw-data consistency check failed: metric name='%s'\n\tisValid(%v, %v) == false", + metricName, lastDataPoint, currentDataPoint) + } + default: + t.Fatalf("Got value of unsupported data type: %T", dataType) } } lastDataPoint = currentDataPoint diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go index 2a9b79d7..cce67c15 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go @@ -22,24 +22,18 @@ package tsdb import ( "context" - "encoding/base64" "encoding/json" "fmt" "math" pathUtil "path" "path/filepath" - "strconv" - "strings" - "sync" "time" "github.com/nuclio/logger" "github.com/pkg/errors" "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-go/pkg/dataplane/http" - "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/appender" - "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" "github.com/v3io/v3io-tsdb/pkg/partmgr" "github.com/v3io/v3io-tsdb/pkg/pquerier" @@ -48,38 +42,22 @@ import ( "github.com/v3io/v3io-tsdb/pkg/utils" ) -const ( - defaultHttpTimeout = 30 * time.Second - - errorCodeString = "ErrorCode" - falseConditionOuterErrorCode = "184549378" // todo: change codes - falseConditionInnerErrorCode = "385876025" - maxExpressionsInUpdateItem = 1500 // max is 2000, we're taking a buffer since it doesn't work with 2000 -) +const defaultHTTPTimeout = 30 * time.Second type V3ioAdapter struct { startTimeMargin int64 logger logger.Logger container v3io.Container - HttpTimeout time.Duration + HTTPTimeout time.Duration MetricsCache *appender.MetricsCache cfg *config.V3ioConfig partitionMngr *partmgr.PartitionManager } -type DeleteParams struct { - Metrics []string - Filter string - From, To int64 - DeleteAll bool - - IgnoreErrors bool -} - func CreateTSDB(cfg *config.V3ioConfig, schema *config.Schema) error { lgr, _ := utils.NewLogger(cfg.LogLevel) - httpTimeout := parseHttpTimeout(cfg, lgr) + httpTimeout := parseHTTPTimeout(cfg, lgr) container, err := utils.CreateContainer(lgr, cfg, httpTimeout) if err != nil { return errors.Wrap(err, "Failed to create a data container.") @@ -101,23 +79,21 @@ func CreateTSDB(cfg *config.V3ioConfig, schema *config.Schema) error { err = container.PutObjectSync(&v3io.PutObjectInput{Path: path, Body: data, DataPlaneInput: dataPlaneInput}) if err != nil { - return errors.Wrapf(err, "Failed to create a TSDB schema at path '%s/%s/%s'.", cfg.WebApiEndpoint, cfg.Container, path) + return errors.Wrapf(err, "Failed to create a TSDB schema at path '%s/%s/%s'.", cfg.WebAPIEndpoint, cfg.Container, path) } return err } -func parseHttpTimeout(cfg *config.V3ioConfig, logger logger.Logger) time.Duration { - if cfg.HttpTimeout == "" { - return defaultHttpTimeout - } else { - timeout, err := time.ParseDuration(cfg.HttpTimeout) - if err != nil { - logger.Warn("Failed to parse httpTimeout '%s'. Defaulting to %d millis.", cfg.HttpTimeout, defaultHttpTimeout/time.Millisecond) - return defaultHttpTimeout - } else { - return timeout - } +func parseHTTPTimeout(cfg *config.V3ioConfig, logger logger.Logger) time.Duration { + if cfg.HTTPTimeout == "" { + return defaultHTTPTimeout } + timeout, err := time.ParseDuration(cfg.HTTPTimeout) + if err != nil { + logger.Warn("Failed to parse httpTimeout '%s'. Defaulting to %d millis.", cfg.HTTPTimeout, defaultHTTPTimeout/time.Millisecond) + return defaultHTTPTimeout + } + return timeout } // Create a new TSDB adapter, similar to Prometheus TSDB adapter but with a few @@ -136,12 +112,12 @@ func NewV3ioAdapter(cfg *config.V3ioConfig, container v3io.Container, logger log } } - newV3ioAdapter.HttpTimeout = parseHttpTimeout(cfg, logger) + newV3ioAdapter.HTTPTimeout = parseHTTPTimeout(cfg, logger) if container != nil { newV3ioAdapter.container = container } else { - newV3ioAdapter.container, err = utils.CreateContainer(newV3ioAdapter.logger, cfg, newV3ioAdapter.HttpTimeout) + newV3ioAdapter.container, err = utils.CreateContainer(newV3ioAdapter.logger, cfg, newV3ioAdapter.HTTPTimeout) if err != nil { return nil, errors.Wrap(err, "Failed to create V3IO data container") } @@ -152,13 +128,13 @@ func NewV3ioAdapter(cfg *config.V3ioConfig, container v3io.Container, logger log return &newV3ioAdapter, err } -func NewContainer(v3ioUrl string, numWorkers int, accessKey string, username string, password string, containerName string, logger logger.Logger) (v3io.Container, error) { +func NewContainer(v3ioURL string, numWorkers int, accessKey string, username string, password string, containerName string, logger logger.Logger) (v3io.Container, error) { ctx, err := v3iohttp.NewContext(logger, v3iohttp.NewDefaultClient(), &v3io.NewContextInput{NumWorkers: numWorkers}) if err != nil { return nil, err } - session, err := ctx.NewSession(&v3io.NewSessionInput{URL: v3ioUrl, Username: username, Password: password, AccessKey: accessKey}) + session, err := ctx.NewSession(&v3io.NewSessionInput{URL: v3ioURL, Username: username, Password: password, AccessKey: accessKey}) if err != nil { return nil, errors.Wrap(err, "Failed to create session.") } @@ -184,14 +160,13 @@ func (a *V3ioAdapter) GetContainer() (v3io.Container, string) { func (a *V3ioAdapter) connect() error { - fullpath := fmt.Sprintf("%s/%s/%s", a.cfg.WebApiEndpoint, a.cfg.Container, a.cfg.TablePath) + fullpath := fmt.Sprintf("%s/%s/%s", a.cfg.WebAPIEndpoint, a.cfg.Container, a.cfg.TablePath) resp, err := a.container.GetObjectSync(&v3io.GetObjectInput{Path: pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName)}) if err != nil { if utils.IsNotExistsError(err) { return errors.Errorf("No TSDB schema file found at '%s'.", fullpath) - } else { - return errors.Wrapf(err, "Failed to read a TSDB schema from '%s'.", fullpath) } + return errors.Wrapf(err, "Failed to read a TSDB schema from '%s'.", fullpath) } tableSchema := config.Schema{} @@ -241,7 +216,7 @@ func (a *V3ioAdapter) Appender() (Appender, error) { } func (a *V3ioAdapter) StartTime() (int64, error) { - startTime := int64(time.Now().Unix() * 1000) + startTime := time.Now().Unix() * 1000 return startTime - 1000*3600*24*1000, nil // TODO: from config or DB w default } @@ -262,55 +237,59 @@ func (a *V3ioAdapter) QuerierV2() (*pquerier.V3ioQuerier, error) { return pquerier.NewV3ioQuerier(a.container, a.logger, a.cfg, a.partitionMngr), nil } -// Delete by time range can optionally specify metrics and filter by labels -func (a *V3ioAdapter) DeleteDB(deleteParams DeleteParams) error { - if deleteParams.DeleteAll { +func (a *V3ioAdapter) DeleteDB(deleteAll bool, ignoreErrors bool, fromTime int64, toTime int64) error { + if deleteAll { // Ignore time boundaries - deleteParams.From = 0 - deleteParams.To = math.MaxInt64 - } else { - if deleteParams.To == 0 { - deleteParams.To = time.Now().Unix() * 1000 - } + fromTime = 0 + toTime = math.MaxInt64 } - // Delete Data - err := a.DeletePartitionsData(&deleteParams) + partitions := a.partitionMngr.PartsForRange(fromTime, toTime, false) + for _, part := range partitions { + a.logger.Info("Deleting partition '%s'.", part.GetTablePath()) + err := utils.DeleteTable(a.logger, a.container, part.GetTablePath(), "", a.cfg.QryWorkers) + if err != nil && !ignoreErrors { + return errors.Wrapf(err, "Failed to delete partition '%s'.", part.GetTablePath()) + } + // Delete the Directory object + err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: part.GetTablePath()}) + if err != nil && !ignoreErrors { + return errors.Wrapf(err, "Failed to delete partition object '%s'.", part.GetTablePath()) + } + } + err := a.partitionMngr.DeletePartitionsFromSchema(partitions) if err != nil { return err } - // If no data is left, delete Names folder if len(a.partitionMngr.GetPartitionsPaths()) == 0 { path := filepath.Join(a.cfg.TablePath, config.NamesDirectory) + "/" // Need a trailing slash a.logger.Info("Delete metric names at path '%s'.", path) err := utils.DeleteTable(a.logger, a.container, path, "", a.cfg.QryWorkers) - if err != nil && !deleteParams.IgnoreErrors { + if err != nil && !ignoreErrors { return errors.Wrap(err, "Failed to delete the metric-names table.") } // Delete the Directory object err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) - if err != nil && !deleteParams.IgnoreErrors { + if err != nil && !ignoreErrors { if !utils.IsNotExistsError(err) { return errors.Wrapf(err, "Failed to delete table object '%s'.", path) } } } - - // If need to 'deleteAll', delete schema + TSDB table folder - if deleteParams.DeleteAll { + if deleteAll { // Delete Schema file schemaPath := pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName) a.logger.Info("Delete the TSDB configuration at '%s'.", schemaPath) err := a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: schemaPath}) - if err != nil && !deleteParams.IgnoreErrors { + if err != nil && !ignoreErrors { return errors.New("The configuration at '" + schemaPath + "' cannot be deleted or doesn't exist.") } // Delete the Directory object path := a.cfg.TablePath + "/" err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) - if err != nil && !deleteParams.IgnoreErrors { + if err != nil && !ignoreErrors { if !utils.IsNotExistsError(err) { return errors.Wrapf(err, "Failed to delete table object '%s'.", path) } @@ -320,457 +299,6 @@ func (a *V3ioAdapter) DeleteDB(deleteParams DeleteParams) error { return nil } -func (a *V3ioAdapter) DeletePartitionsData(deleteParams *DeleteParams) error { - partitions := a.partitionMngr.PartsForRange(deleteParams.From, deleteParams.To, true) - var entirelyDeletedPartitions []*partmgr.DBPartition - - deleteWholePartition := deleteParams.DeleteAll || (deleteParams.Filter == "" && len(deleteParams.Metrics) == 0) - - fileToDeleteChan := make(chan v3io.Item, 1024) - getItemsTerminationChan := make(chan error, len(partitions)) - deleteTerminationChan := make(chan error, a.cfg.Workers) - numOfGetItemsRoutines := len(partitions) - if len(deleteParams.Metrics) > 0 { - numOfGetItemsRoutines = numOfGetItemsRoutines * len(deleteParams.Metrics) - } - goRoutinesNum := numOfGetItemsRoutines + a.cfg.Workers - onErrorTerminationChannel := make(chan struct{}, goRoutinesNum) - systemAttributesToFetch := []string{config.ObjectNameAttrName, config.MtimeSecsAttributeName, config.MtimeNSecsAttributeName, config.EncodingAttrName, config.MaxTimeAttrName} - var getItemsWorkers, getItemsTerminated, deletesTerminated int - - var getItemsWG sync.WaitGroup - getItemsErrorChan := make(chan error, numOfGetItemsRoutines) - - aggregates := a.GetSchema().PartitionSchemaInfo.Aggregates - hasServerSideAggregations := len(aggregates) != 1 || aggregates[0] != "" - - var aggrMask aggregate.AggrType - var err error - if hasServerSideAggregations { - aggrMask, _, err = aggregate.AggregatesFromStringListWithCount(aggregates) - if err != nil { - return err - } - } - - for i := 0; i <= a.cfg.Workers; i++ { - go deleteObjectWorker(a.container, deleteParams, a.logger, - fileToDeleteChan, deleteTerminationChan, onErrorTerminationChannel, - aggrMask) - } - - for _, part := range partitions { - partitionEntirelyInRange := deleteParams.From <= part.GetStartTime() && deleteParams.To >= part.GetEndTime() - deleteEntirePartitionFolder := partitionEntirelyInRange && deleteWholePartition - - // Delete all files in partition folder and then delete the folder itself - if deleteEntirePartitionFolder { - a.logger.Info("Deleting entire partition '%s'.", part.GetTablePath()) - - getItemsWG.Add(1) - go deleteEntirePartition(a.logger, a.container, part.GetTablePath(), a.cfg.QryWorkers, - &getItemsWG, getItemsErrorChan) - - entirelyDeletedPartitions = append(entirelyDeletedPartitions, part) - // First get all items based on filter+metric+time range then delete what is necessary - } else { - a.logger.Info("Deleting partial partition '%s'.", part.GetTablePath()) - - start, end := deleteParams.From, deleteParams.To - - // Round the start and end times to the nearest aggregation buckets - to later on recalculate server side aggregations - if hasServerSideAggregations { - start = part.GetAggregationBucketStartTime(part.Time2Bucket(deleteParams.From)) - end = part.GetAggregationBucketEndTime(part.Time2Bucket(deleteParams.To)) - } - - var chunkAttributesToFetch []string - - // If we don't want to delete the entire object, fetch also the desired chunks to delete. - if !partitionEntirelyInRange { - chunkAttributesToFetch, _ = part.Range2Attrs("v", start, end) - } - - allAttributes := append(chunkAttributesToFetch, systemAttributesToFetch...) - if len(deleteParams.Metrics) == 0 { - getItemsWorkers++ - input := &v3io.GetItemsInput{Path: part.GetTablePath(), - AttributeNames: allAttributes, - Filter: deleteParams.Filter} - go getItemsWorker(a.logger, a.container, input, part, fileToDeleteChan, getItemsTerminationChan, onErrorTerminationChannel) - } else { - for _, metric := range deleteParams.Metrics { - for _, shardingKey := range part.GetShardingKeys(metric) { - getItemsWorkers++ - input := &v3io.GetItemsInput{Path: part.GetTablePath(), - AttributeNames: allAttributes, - Filter: deleteParams.Filter, - ShardingKey: shardingKey} - go getItemsWorker(a.logger, a.container, input, part, fileToDeleteChan, getItemsTerminationChan, onErrorTerminationChannel) - } - } - } - } - } - a.logger.Debug("issued %v getItems", getItemsWorkers) - - // Waiting fot deleting of full partitions - getItemsWG.Wait() - select { - case err = <-getItemsErrorChan: - fmt.Println("got error", err) - // Signal all other goroutines to quite - for i := 0; i < goRoutinesNum; i++ { - onErrorTerminationChannel <- struct{}{} - } - return err - default: - } - - if getItemsWorkers != 0 { - for deletesTerminated < a.cfg.Workers { - select { - case err := <-getItemsTerminationChan: - a.logger.Debug("finished getItems worker, total finished: %v, error: %v", getItemsTerminated+1, err) - if err != nil { - // If requested to ignore non-existing tables do not return error. - if !(deleteParams.IgnoreErrors && utils.IsNotExistsOrConflictError(err)) { - for i := 0; i < goRoutinesNum; i++ { - onErrorTerminationChannel <- struct{}{} - } - return errors.Wrapf(err, "GetItems failed during recursive delete.") - } - } - getItemsTerminated++ - - if getItemsTerminated == getItemsWorkers { - close(fileToDeleteChan) - } - case err := <-deleteTerminationChan: - a.logger.Debug("finished delete worker, total finished: %v, err: %v", deletesTerminated+1, err) - if err != nil { - for i := 0; i < goRoutinesNum; i++ { - onErrorTerminationChannel <- struct{}{} - } - return errors.Wrapf(err, "Delete failed during recursive delete.") - } - deletesTerminated++ - } - } - } else { - close(fileToDeleteChan) - } - - a.logger.Debug("finished deleting data, removing partitions from schema") - err = a.partitionMngr.DeletePartitionsFromSchema(entirelyDeletedPartitions) - if err != nil { - return err - } - - return nil -} - -func deleteEntirePartition(logger logger.Logger, container v3io.Container, partitionPath string, workers int, - wg *sync.WaitGroup, errChannel chan<- error) { - defer wg.Done() - - err := utils.DeleteTable(logger, container, partitionPath, "", workers) - if err != nil { - errChannel <- errors.Wrapf(err, "Failed to delete partition '%s'.", partitionPath) - } - // Delete the Directory object - err = container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: partitionPath}) - if err != nil { - errChannel <- errors.Wrapf(err, "Failed to delete partition folder '%s'.", partitionPath) - } -} - -func getItemsWorker(logger logger.Logger, container v3io.Container, input *v3io.GetItemsInput, partition *partmgr.DBPartition, - filesToDeleteChan chan<- v3io.Item, terminationChan chan<- error, onErrorTerminationChannel <-chan struct{}) { - for { - select { - case _ = <-onErrorTerminationChannel: - terminationChan <- nil - return - default: - } - - logger.Debug("going to getItems for partition '%v', input: %v", partition.GetTablePath(), *input) - resp, err := container.GetItemsSync(input) - if err != nil { - terminationChan <- err - return - } - resp.Release() - output := resp.Output.(*v3io.GetItemsOutput) - - for _, item := range output.Items { - item["partition"] = partition - - // In case we got error on delete while iterating getItems response - select { - case _ = <-onErrorTerminationChannel: - terminationChan <- nil - return - default: - } - - filesToDeleteChan <- item - } - if output.Last { - terminationChan <- nil - return - } - input.Marker = output.NextMarker - } -} - -func deleteObjectWorker(container v3io.Container, deleteParams *DeleteParams, logger logger.Logger, - filesToDeleteChannel <-chan v3io.Item, terminationChan chan<- error, onErrorTerminationChannel <-chan struct{}, - aggrMask aggregate.AggrType) { - for { - select { - case _ = <-onErrorTerminationChannel: - return - case itemToDelete, ok := <-filesToDeleteChannel: - if !ok { - terminationChan <- nil - return - } - - currentPartition := itemToDelete.GetField("partition").(*partmgr.DBPartition) - fileName, err := itemToDelete.GetFieldString(config.ObjectNameAttrName) - if err != nil { - terminationChan <- err - return - } - fullFileName := pathUtil.Join(currentPartition.GetTablePath(), fileName) - - // Delete whole object - if deleteParams.From <= currentPartition.GetStartTime() && - deleteParams.To >= currentPartition.GetEndTime() { - - logger.Debug("delete entire item '%v' ", fullFileName) - input := &v3io.DeleteObjectInput{Path: fullFileName} - err = container.DeleteObjectSync(input) - if err != nil && !utils.IsNotExistsOrConflictError(err) { - terminationChan <- err - return - } - // Delete partial object - specific chunks or sub-parts of chunks - } else { - mtimeSecs, err := itemToDelete.GetFieldInt(config.MtimeSecsAttributeName) - if err != nil { - terminationChan <- err - return - } - mtimeNSecs, err := itemToDelete.GetFieldInt(config.MtimeNSecsAttributeName) - if err != nil { - terminationChan <- err - return - } - - deleteUpdateExpression := strings.Builder{} - dataEncoding, err := getEncoding(itemToDelete) - if err != nil { - terminationChan <- err - return - } - - var aggregationsByBucket map[int]*aggregate.AggregatesList - if aggrMask != 0 { - aggregationsByBucket = make(map[int]*aggregate.AggregatesList) - aggrBuckets := currentPartition.Times2BucketRange(deleteParams.From, deleteParams.To) - for _, bucketID := range aggrBuckets { - aggregationsByBucket[bucketID] = aggregate.NewAggregatesList(aggrMask) - } - } - - var newMaxTime int64 = math.MaxInt64 - var numberOfExpressionsInUpdate int - for attributeName, value := range itemToDelete { - if strings.HasPrefix(attributeName, "_v") { - // Check whether the whole chunk attribute needed to be deleted or just part of it. - if currentPartition.IsChunkInRangeByAttr(attributeName, deleteParams.From, deleteParams.To) { - deleteUpdateExpression.WriteString("delete(") - deleteUpdateExpression.WriteString(attributeName) - deleteUpdateExpression.WriteString(");") - } else { - currentChunksMaxTime, err := generatePartialChunkDeleteExpression(logger, &deleteUpdateExpression, attributeName, - value.([]byte), dataEncoding, deleteParams, currentPartition, aggregationsByBucket) - if err != nil { - terminationChan <- err - return - } - - // We want to save the earliest max time possible - if currentChunksMaxTime < newMaxTime { - newMaxTime = currentChunksMaxTime - } - } - numberOfExpressionsInUpdate++ - } - } - - dbMaxTime := int64(itemToDelete.GetField(config.MaxTimeAttrName).(int)) - - // Update the partition's max time if needed. - if deleteParams.From < dbMaxTime && deleteParams.To >= dbMaxTime { - if deleteParams.From < newMaxTime { - newMaxTime = deleteParams.From - } - - deleteUpdateExpression.WriteString(fmt.Sprintf("%v=%v;", config.MaxTimeAttrName, newMaxTime)) - } - - if deleteUpdateExpression.Len() > 0 { - // If there are server aggregates, update the needed buckets - if aggrMask != 0 { - for bucket, aggregations := range aggregationsByBucket { - numberOfExpressionsInUpdate = numberOfExpressionsInUpdate + len(*aggregations) - - // Due to engine limitation, If we reached maximum number of expressions in an UpdateItem - // we need to break the update into chunks - // TODO: refactor in 2.8: - // in 2.8 there is a better way of doing it by uniting multiple update expressions into - // one expression by range in a form similar to `_v_sum[15...100]=0` - if numberOfExpressionsInUpdate < maxExpressionsInUpdateItem { - deleteUpdateExpression.WriteString(aggregations.SetExpr("v", bucket)) - } else { - exprStr := deleteUpdateExpression.String() - logger.Debug("delete item '%v' with expression '%v'", fullFileName, exprStr) - mtimeSecs, mtimeNSecs, err = sendUpdateItem(fullFileName, exprStr, mtimeSecs, mtimeNSecs, container) - if err != nil { - terminationChan <- err - return - } - - // Reset stuff for next update iteration - numberOfExpressionsInUpdate = 0 - deleteUpdateExpression.Reset() - } - } - } - - // If any expressions are left, save them - if deleteUpdateExpression.Len() > 0 { - exprStr := deleteUpdateExpression.String() - logger.Debug("delete item '%v' with expression '%v'", fullFileName, exprStr) - _, _, err = sendUpdateItem(fullFileName, exprStr, mtimeSecs, mtimeNSecs, container) - if err != nil { - terminationChan <- err - return - } - } - } - } - } - } -} - -func sendUpdateItem(path, expr string, mtimeSecs, mtimeNSecs int, container v3io.Container) (int, int, error) { - condition := fmt.Sprintf("%v == %v and %v == %v", - config.MtimeSecsAttributeName, mtimeSecs, - config.MtimeNSecsAttributeName, mtimeNSecs) - - input := &v3io.UpdateItemInput{Path: path, - Expression: &expr, - Condition: condition} - - response, err := container.UpdateItemSync(input) - if err != nil && !utils.IsNotExistsOrConflictError(err) { - returnError := err - if isFalseConditionError(err) { - returnError = errors.Wrapf(err, "Item '%v' was updated while deleting occurred. Please disable any ingestion and retry.", path) - } - return 0, 0, returnError - } - - output := response.Output.(*v3io.UpdateItemOutput) - return output.MtimeSecs, output.MtimeNSecs, nil -} - -func getEncoding(itemToDelete v3io.Item) (chunkenc.Encoding, error) { - var encoding chunkenc.Encoding - encodingStr, ok := itemToDelete.GetField(config.EncodingAttrName).(string) - // If we don't have the encoding attribute, use XOR as default. (for backwards compatibility) - if !ok { - encoding = chunkenc.EncXOR - } else { - intEncoding, err := strconv.Atoi(encodingStr) - if err != nil { - return 0, fmt.Errorf("error parsing encoding type of chunk, got: %v, error: %v", encodingStr, err) - } else { - encoding = chunkenc.Encoding(intEncoding) - } - } - - return encoding, nil -} - -func generatePartialChunkDeleteExpression(logger logger.Logger, expr *strings.Builder, - attributeName string, value []byte, encoding chunkenc.Encoding, deleteParams *DeleteParams, - partition *partmgr.DBPartition, aggregationsByBucket map[int]*aggregate.AggregatesList) (int64, error) { - chunk, err := chunkenc.FromData(logger, encoding, value, 0) - if err != nil { - return 0, err - } - - newChunk := chunkenc.NewChunk(logger, encoding == chunkenc.EncVariant) - appender, err := newChunk.Appender() - if err != nil { - return 0, err - } - - var currentMaxTime int64 - var remainingItemsCount int - iter := chunk.Iterator() - for iter.Next() { - var t int64 - var v interface{} - if encoding == chunkenc.EncXOR { - t, v = iter.At() - } else { - t, v = iter.AtString() - } - - // Append back only events that are not in the delete range - if t < deleteParams.From || t > deleteParams.To { - remainingItemsCount++ - appender.Append(t, v) - - // Calculate server-side aggregations - if aggregationsByBucket != nil { - currentAgg, ok := aggregationsByBucket[partition.Time2Bucket(t)] - // A chunk may contain more data then needed for the aggregations, if this is the case do not aggregate - if ok { - currentAgg.Aggregate(t, v) - } - } - - // Update current chunk's new max time - if t > currentMaxTime { - currentMaxTime = t - } - } - } - - if remainingItemsCount == 0 { - expr.WriteString("delete(") - expr.WriteString(attributeName) - expr.WriteString(");") - currentMaxTime, _ = partition.GetChunkStartTimeByAttr(attributeName) - } else { - bytes := appender.Chunk().Bytes() - val := base64.StdEncoding.EncodeToString(bytes) - - expr.WriteString(fmt.Sprintf("%s=blob('%s'); ", attributeName, val)) - } - - return currentMaxTime, nil - -} - // Return the number of items in a TSDB table func (a *V3ioAdapter) CountMetrics(part string) (int, error) { count := 0 @@ -829,16 +357,3 @@ type Appender interface { Rollback() error Close() } - -// Check if the current error was caused specifically because the condition was evaluated to false. -func isFalseConditionError(err error) bool { - errString := err.Error() - - if strings.Count(errString, errorCodeString) == 2 && - strings.Contains(errString, falseConditionOuterErrorCode) && - strings.Contains(errString, falseConditionInnerErrorCode) { - return true - } - - return false -} diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go index 18aac56d..89f4fb38 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb_integration_test.go @@ -24,14 +24,20 @@ package tsdb_test import ( "encoding/json" + "fmt" + "math" + "path" "sort" + "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/partmgr" . "github.com/v3io/v3io-tsdb/pkg/tsdb" "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest" "github.com/v3io/v3io-tsdb/pkg/tsdb/tsdbtest/testutils" @@ -41,6 +47,7 @@ import ( const defaultStepMs = 5 * tsdbtest.MinuteInMillis // 5 minutes func TestIngestData(t *testing.T) { + timestamp := fmt.Sprintf("%d", time.Now().Unix()) //time.Now().Format(time.RFC3339) testCases := []struct { desc string params tsdbtest.TestParams @@ -98,6 +105,46 @@ func TestIngestData(t *testing.T) { }}}, ), }, + {desc: "Should drop values of incompatible data types (prepare data for: IG-13146)", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "IG13146", + Labels: utils.LabelsFromStringList("test", "IG-13146", "float", "string"), + Data: []tsdbtest.DataPoint{ + {Time: 15, Value: 0.1}, // first add float value + {Time: 20, Value: "some string value"}, // then attempt to add string value + {Time: 30, Value: 0.2}, // and finally add another float value + }, + ExpectedCount: func() *int { var expectedCount = 2; return &expectedCount }(), + }}}, + tsdbtest.TestOption{ + Key: "override_test_name", + Value: fmt.Sprintf("IG-13146-%s", timestamp)}), + }, + {desc: "IG-13146: Should reject values of incompatible data types without data corruption", + params: tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "IG13146", + Labels: utils.LabelsFromStringList("test", "IG-13146", "float", "string"), + Data: []tsdbtest.DataPoint{ + {Time: 50, Value: "another string value"}, // then attempt to add string value + {Time: 60, Value: 0.4}, // valid values from this batch will be dropped + {Time: 70, Value: 0.3}, // because processing of entire batch will stop + }, + ExpectedCount: func() *int { var expectedCount = 2; return &expectedCount }(), + }}}, + tsdbtest.TestOption{ + Key: "override_test_name", + Value: fmt.Sprintf("IG-13146-%s", timestamp)}, + tsdbtest.TestOption{ + Key: "expected_error_contains_string", + // Note, the expected error message should align with pkg/appender/ingest.go:308 + Value: "trying to ingest values of incompatible data type"}), + }, } for _, test := range testCases { @@ -140,13 +187,26 @@ func testIngestDataCase(t *testing.T, testParams tsdbtest.TestParams) { } if _, err := appender.WaitForCompletion(0); err != nil { - t.Fatalf("Failed to wait for appender completion. reason: %s", err) + if !isExpected(testParams, err) { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } } - tsdbtest.ValidateCountOfSamples(t, adapter, dp.Name, len(dp.Data), from, to, -1) + expectedCount := len(dp.Data) + if dp.ExpectedCount != nil { + expectedCount = *dp.ExpectedCount + } + tsdbtest.ValidateCountOfSamples(t, adapter, dp.Name, expectedCount, from, to, -1) } } +func isExpected(testParams tsdbtest.TestParams, actualErr error) bool { + if errMsg, ok := testParams["expected_error_contains_string"]; ok { + return strings.Contains(actualErr.Error(), fmt.Sprintf("%v", errMsg)) + } + return false +} + func TestIngestDataWithSameTimestamp(t *testing.T) { baseTime := int64(1532209200000) testParams := tsdbtest.NewTestParams(t, @@ -555,8 +615,18 @@ func testQueryDataCase(test *testing.T, testParams tsdbtest.TestParams, filter s if err != nil { test.Fatal(err) } - assert.ElementsMatch(test, expected[currentAggregate], actual, - "Check failed for aggregate='%s'. Query aggregates: %s", currentAggregate, queryAggregates) + + for _, data := range expected[currentAggregate] { + var equalCount = 0 + for _, dp := range actual { + if dp.Equals(data) { + equalCount++ + continue + } + } + assert.Equal(test, equalCount, len(expected[currentAggregate]), + "Check failed for aggregate='%s'. Query aggregates: %s", currentAggregate, queryAggregates) + } } if set.Err() != nil { @@ -682,7 +752,14 @@ func testQueryDataOverlappingWindowCase(test *testing.T, v3ioConfig *config.V3io } assert.EqualValues(test, len(windows), len(actual)) for _, data := range expected[agg] { - assert.Contains(test, actual, data) + var equalCount = 0 + for _, dp := range actual { + if dp.Equals(data) { + equalCount++ + continue + } + } + assert.Equal(test, equalCount, len(expected[agg])) } } @@ -763,7 +840,17 @@ func TestIgnoreNaNWhenSeekingAggSeries(t *testing.T) { } actual = append(actual, tsdbtest.DataPoint{Time: t1, Value: v1}) } - assert.ElementsMatch(t, expected[agg], actual) + + for _, data := range expected[agg] { + var equalCount = 0 + for _, dp := range actual { + if dp.Equals(data) { + equalCount++ + continue + } + } + assert.Equal(t, equalCount, len(expected[agg])) + } } if set.Err() != nil { @@ -836,7 +923,8 @@ func TestDeleteTSDB(t *testing.T) { t.Fatal(res.Error.Error()) } - if err := adapter.DeleteDB(DeleteParams{DeleteAll: true, IgnoreErrors: true}); err != nil { + now := time.Now().Unix() * 1000 // now time in millis + if err := adapter.DeleteDB(true, true, 0, now); err != nil { t.Fatalf("Failed to delete DB on teardown. reason: %s", err) } @@ -849,6 +937,269 @@ func TestDeleteTSDB(t *testing.T) { } } +func TestDeleteTable(t *testing.T) { + ta, _ := time.Parse(time.RFC3339, "2018-10-03T05:00:00Z") + t1 := ta.Unix() * 1000 + tb, _ := time.Parse(time.RFC3339, "2018-10-07T05:00:00Z") + t2 := tb.Unix() * 1000 + tc, _ := time.Parse(time.RFC3339, "2018-10-11T05:00:00Z") + t3 := tc.Unix() * 1000 + td, _ := time.Parse(time.RFC3339, "2025-10-11T05:00:00Z") + futurePoint := td.Unix() * 1000 + + testCases := []struct { + desc string + deleteFrom int64 + deleteTo int64 + deleteAll bool + ignoreErrors bool + data []tsdbtest.DataPoint + expected []tsdbtest.DataPoint + ignoreReason string + }{ + {desc: "Should delete all table by time", + deleteFrom: 0, + deleteTo: 9999999999999, + deleteAll: false, + ignoreErrors: true, + data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}}, + expected: []tsdbtest.DataPoint{}, + }, + {desc: "Should delete all table by deleteAll", + deleteFrom: 0, + deleteTo: 0, + deleteAll: true, + ignoreErrors: true, + data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}, + {Time: futurePoint, Value: 555.5}}, + expected: []tsdbtest.DataPoint{}, + }, + {desc: "Should skip partial partition at begining", + deleteFrom: t1 - 10000, + deleteTo: 9999999999999, + deleteAll: false, + ignoreErrors: true, + data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}}, + expected: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}}, + }, + {desc: "Should skip partial partition at end", + deleteFrom: 0, + deleteTo: t3 + 10000, + deleteAll: false, + ignoreErrors: true, + data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}}, + expected: []tsdbtest.DataPoint{{Time: t3, Value: 444.4}}, + }, + {desc: "Should skip partial partition at beginning and end not in range", + deleteFrom: t1 + 10000, + deleteTo: t3 - 10000, + deleteAll: false, + ignoreErrors: true, + data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}}, + expected: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t3, Value: 444.4}}, + }, + {desc: "Should skip partial partition at beginning and end although in range", + deleteFrom: t1 - 10000, + deleteTo: t3 + 10000, + deleteAll: false, + ignoreErrors: true, + data: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t2, Value: 333.3}, + {Time: t3, Value: 444.4}}, + expected: []tsdbtest.DataPoint{{Time: t1, Value: 222.2}, + {Time: t3, Value: 444.4}}, + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + if test.ignoreReason != "" { + t.Skip(test.ignoreReason) + } + testDeleteTSDBCase(t, + tsdbtest.NewTestParams(t, + tsdbtest.TestOption{ + Key: tsdbtest.OptDropTableOnTearDown, + Value: !test.deleteAll}, + tsdbtest.TestOption{ + Key: tsdbtest.OptTimeSeries, + Value: tsdbtest.TimeSeries{tsdbtest.Metric{ + Name: "metricToDelete", + Labels: utils.LabelsFromStringList("os", "linux"), + Data: test.data, + }}}, + ), + test.deleteFrom, test.deleteTo, test.ignoreErrors, test.deleteAll, test.expected) + }) + } +} + +func testDeleteTSDBCase(test *testing.T, testParams tsdbtest.TestParams, deleteFrom int64, deleteTo int64, ignoreErrors bool, deleteAll bool, + expected []tsdbtest.DataPoint) { + + adapter, teardown := tsdbtest.SetUpWithData(test, testParams) + defer teardown() + + container, err := utils.CreateContainer(adapter.GetLogger("container"), testParams.V3ioConfig(), adapter.HTTPTimeout) + if err != nil { + test.Fatalf("failed to create new container. reason: %s", err) + } + pm, err := partmgr.NewPartitionMngr(adapter.GetSchema(), container, testParams.V3ioConfig()) + if err != nil { + test.Fatalf("Failed to create new partition manager. reason: %s", err) + } + + initiaPartitions := pm.PartsForRange(0, math.MaxInt64, true) + initialNumberOfPartitions := len(initiaPartitions) + + partitionsToDelete := pm.PartsForRange(deleteFrom, deleteTo, false) + + if err := adapter.DeleteDB(deleteAll, ignoreErrors, deleteFrom, deleteTo); err != nil { + test.Fatalf("Failed to delete DB. reason: %s", err) + } + + if !deleteAll { + pm1, err := partmgr.NewPartitionMngr(adapter.GetSchema(), container, testParams.V3ioConfig()) + remainingParts := pm1.PartsForRange(0, math.MaxInt64, false) + assert.Equal(test, len(remainingParts), initialNumberOfPartitions-len(partitionsToDelete)) + + qry, err := adapter.Querier(nil, 0, math.MaxInt64) + if err != nil { + test.Fatalf("Failed to create Querier. reason: %v", err) + } + + for _, metric := range testParams.TimeSeries() { + set, err := qry.Select(metric.Name, "", 0, "") + if err != nil { + test.Fatalf("Failed to run Select. reason: %v", err) + } + + set.Next() + if set.Err() != nil { + test.Fatalf("Failed to query metric. reason: %v", set.Err()) + } + + series := set.At() + if series == nil && len(expected) == 0 { + //table is expected to be empty + } else if series != nil { + iter := series.Iterator() + if iter.Err() != nil { + test.Fatalf("Failed to query data series. reason: %v", iter.Err()) + } + + actual, err := iteratorToSlice(iter) + if err != nil { + test.Fatal(err) + } + assert.ElementsMatch(test, expected, actual) + } else { + test.Fatalf("Result series is empty while expected result set is not!") + } + } + } else { + container, tablePath := adapter.GetContainer() + tableSchemaPath := path.Join(tablePath, config.SchemaConfigFileName) + + // Validate: schema does not exist + _, err := container.GetObjectSync(&v3io.GetObjectInput{Path: tableSchemaPath}) + if err != nil { + if utils.IsNotExistsError(err) { + // OK - expected + } else { + test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tableSchemaPath, err) + } + } + + // Validate: table does not exist + _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: tablePath}) + if err != nil { + if utils.IsNotExistsError(err) { + // OK - expected + } else { + test.Fatalf("Failed to read a TSDB schema from '%s'.\nError: %v", tablePath, err) + } + } + } +} + +func TestIngestDataFloatThenString(t *testing.T) { + testParams := tsdbtest.NewTestParams(t) + + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + labels := utils.Labels{utils.Label{Name: "__name__", Value: "cpu"}} + _, err = appender.Add(labels, 1532940510000, 12.0) + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + _, err = appender.Add(labels, 1532940610000, "tal") + if err == nil { + t.Fatal("expected failure but finished successfully") + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + + tsdbtest.ValidateCountOfSamples(t, adapter, "cpu", 1, 0, 1532950510000, -1) +} + +func TestIngestDataStringThenFloat(t *testing.T) { + testParams := tsdbtest.NewTestParams(t) + + defer tsdbtest.SetUp(t, testParams)() + + adapter, err := NewV3ioAdapter(testParams.V3ioConfig(), nil, nil) + if err != nil { + t.Fatalf("Failed to create v3io adapter. reason: %s", err) + } + + appender, err := adapter.Appender() + if err != nil { + t.Fatalf("Failed to get appender. reason: %s", err) + } + + labels := utils.Labels{utils.Label{Name: "__name__", Value: "cpu"}} + _, err = appender.Add(labels, 1532940510000, "tal") + if err != nil { + t.Fatalf("Failed to add data to appender. reason: %s", err) + } + + _, err = appender.Add(labels, 1532940610000, 666.0) + if err == nil { + t.Fatal("expected failure but finished successfully") + } + + if _, err := appender.WaitForCompletion(0); err != nil { + t.Fatalf("Failed to wait for appender completion. reason: %s", err) + } + + tsdbtest.ValidateCountOfSamples(t, adapter, "cpu", 1, 0, 1532950510000, -1) +} + func iteratorToSlice(it chunkenc.Iterator) ([]tsdbtest.DataPoint, error) { var result []tsdbtest.DataPoint for it.Next() { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/add.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/add.go index 4a5b4124..7b1090e0 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/add.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/add.go @@ -87,7 +87,7 @@ Arguments: RunE: func(cmd *cobra.Command, args []string) error { if commandeer.inFile != "" && commandeer.stdin { - return errors.New("-f|--file and --stdin are mutually exclusive.") + return errors.New("-f|--file and --stdin are mutually exclusive") } if commandeer.inFile == "" && !commandeer.stdin { @@ -153,7 +153,7 @@ func (ac *addCommandeer) add() error { } if ac.vArr == "" { - return errors.New("The metric-samples array must have at least one value (currently empty).") + return errors.New("the metric-samples array must have at least one value (currently empty)") } tarray, varray, err := strToTV(ac.tArr, ac.vArr) @@ -275,11 +275,11 @@ func strToTV(tarr, varr string) ([]int64, []interface{}, error) { vlist := strings.Split(varr, ArraySeparator) if tarr == "" && len(vlist) > 1 { - return nil, nil, errors.New("A times array must be provided when providing a values array.") + return nil, nil, errors.New("a times array must be provided when providing a values array") } if tarr != "" && len(tlist) != len(vlist) { - return nil, nil, errors.New("The times and values arrays don't have the same amount of elements.") + return nil, nil, errors.New("the times and values arrays don't have the same amount of elements") } var tarray []int64 @@ -299,7 +299,7 @@ func strToTV(tarr, varr string) ([]int64, []interface{}, error) { } } - now := int64(time.Now().Unix() * 1000) + now := time.Now().Unix() * 1000 if tarr == "" { tarray = append(tarray, now) } else { @@ -312,7 +312,7 @@ func strToTV(tarr, varr string) ([]int64, []interface{}, error) { if err != nil { return nil, nil, errors.Wrap(err, "Failed to parse the pattern following 'now-'.") } - tarray = append(tarray, now-int64(t)) + tarray = append(tarray, now-t) } else { t, err := strconv.Atoi(tlist[i]) if err != nil { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/check.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/check.go index f7d0f3ac..52dee2e0 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/check.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/check.go @@ -70,7 +70,7 @@ Arguments: numArgs := len(args) if numArgs == 0 { - return errors.New("The check command requires a metric name.") + return errors.New("the check command requires a metric name") } if strings.Contains(args[0], "/") { @@ -274,46 +274,44 @@ func (cc *checkCommandeer) printValues(bytes []byte, encoding chunkenc.Encoding) if err != nil { cc.rootCommandeer.logger.ErrorWith("Error reading chunk buffer.", "err", err) return err - } else { - count := 0 - iter := chunk.Iterator() - for iter.Next() { - var v interface{} - var t int64 - if encoding == chunkenc.EncXOR { - t, v = iter.At() - } else { - t, v = iter.AtString() - } - - tstr := time.Unix(int64(t/1000), 0).UTC().Format(time.RFC3339) - fmt.Printf("\t\tUnix timestamp=%d, t=%s, v=%v\n", t, tstr, v) - count++ - } - if iter.Err() != nil { - return errors.Wrap(iter.Err(), "Failed to read the iterator.") + } + count := 0 + iter := chunk.Iterator() + for iter.Next() { + var v interface{} + var t int64 + if encoding == chunkenc.EncXOR { + t, v = iter.At() + } else { + t, v = iter.AtString() } - avgSampleSize := 0.0 - bytesCount := len(bytes) - if count > 0 { - avgSampleSize = float64(bytesCount) / float64(count) - } - fmt.Printf("Total size=%d, Count=%d, Avg sample size=%.2f\n", - bytesCount, count, avgSampleSize) + tstr := time.Unix(t/1000, 0).UTC().Format(time.RFC3339) + fmt.Printf("\t\tUnix timestamp=%d, t=%s, v=%v\n", t, tstr, v) + count++ + } + if iter.Err() != nil { + return errors.Wrap(iter.Err(), "Failed to read the iterator.") + } + + avgSampleSize := 0.0 + bytesCount := len(bytes) + if count > 0 { + avgSampleSize = float64(bytesCount) / float64(count) } + fmt.Printf("Total size=%d, Count=%d, Avg sample size=%.2f\n", + bytesCount, count, avgSampleSize) return nil } func getSchema(cfg *config.V3ioConfig, container v3io.Container) (*config.Schema, error) { - fullpath := fmt.Sprintf("%s/%s/%s", cfg.WebApiEndpoint, cfg.Container, cfg.TablePath) + fullpath := fmt.Sprintf("%s/%s/%s", cfg.WebAPIEndpoint, cfg.Container, cfg.TablePath) resp, err := container.GetObjectSync(&v3io.GetObjectInput{Path: path.Join(cfg.TablePath, config.SchemaConfigFileName)}) if err != nil { if utils.IsNotExistsError(err) { return nil, errors.Errorf("No TSDB schema file found at '%s'.", fullpath) - } else { - return nil, errors.Wrapf(err, "Failed to read a TSDB schema from '%s'.", fullpath) } + return nil, errors.Wrapf(err, "Failed to read a TSDB schema from '%s'.", fullpath) } @@ -329,7 +327,6 @@ func getEncoding(enc string) (chunkenc.Encoding, error) { intEncoding, err := strconv.Atoi(enc) if err != nil { return 0, fmt.Errorf("error parsing encoding type, encoding type should be numberic, got: %v", enc) - } else { - return chunkenc.Encoding(intEncoding), nil } + return chunkenc.Encoding(intEncoding), nil } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go index 9d33c58a..4e6898cb 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/delete.go @@ -30,21 +30,17 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/v3io/v3io-tsdb/pkg/config" - "github.com/v3io/v3io-tsdb/pkg/tsdb" "github.com/v3io/v3io-tsdb/pkg/utils" ) type delCommandeer struct { cmd *cobra.Command rootCommandeer *RootCommandeer + deleteAll bool + ignoreErrors bool force bool - - deleteAll bool - ignoreErrors bool - fromTime string - toTime string - filter string - metrics string + fromTime string + toTime string } func newDeleteCommandeer(rootCommandeer *RootCommandeer) *delCommandeer { @@ -70,9 +66,6 @@ Notes: metric items with older or newer times. Use the info command to view the partitioning interval.`, RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - return errors.New("delete does not accept unnamed arguments. Did you forget to use a flag?") - } // Initialize parameters return commandeer.delete() }, @@ -88,10 +81,6 @@ Notes: "End (maximum) time for the delete operation, as a string containing an\nRFC 3339 time string, a Unix timestamp in milliseconds, or a relative\ntime of the format \"now\" or \"now-[0-9]+[mhd]\" (where 'm' = minutes,\n'h' = hours, and 'd' = days). Examples: \"2018-09-26T14:10:20Z\";\n\"1537971006000\"; \"now-3h\"; \"now-7d\". (default \"now\")") cmd.Flags().StringVarP(&commandeer.fromTime, "begin", "b", "", "Start (minimum) time for the delete operation, as a string containing\nan RFC 3339 time, a Unix timestamp in milliseconds, a relative time of\nthe format \"now\" or \"now-[0-9]+[mhd]\" (where 'm' = minutes, 'h' = hours,\nand 'd' = days), or 0 for the earliest time. Examples:\n\"2016-01-02T15:34:26Z\"; \"1451748866\"; \"now-90m\"; \"0\". (default =\n - 1h)") - cmd.Flags().StringVar(&commandeer.filter, "filter", "", - "Query filter, as an Iguazio Data Science Platform\nfilter expression. \nExamples: \"method=='get'\"; \"method=='get' AND os=='win'\".") - cmd.Flags().StringVarP(&commandeer.metrics, "metrics", "m", "", - "Comma-separated list of metric names to delete. If you don't set this argument, all metrics will be deleted according to the time range and filter specified.") commandeer.cmd = cmd return commandeer @@ -135,26 +124,13 @@ func (dc *delCommandeer) delete() error { } if !confirmedByUser { - return errors.New("Delete cancelled by the user.") - } - } - - var metricsToDelete []string - if dc.metrics != "" { - for _, m := range strings.Split(dc.metrics, ",") { - metricsToDelete = append(metricsToDelete, strings.TrimSpace(m)) + return errors.New("delete cancelled by the user") } } - params := tsdb.DeleteParams{DeleteAll: dc.deleteAll, - IgnoreErrors: dc.ignoreErrors, - From: from, - To: to, - Metrics: metricsToDelete, - Filter: dc.filter} - err = dc.rootCommandeer.adapter.DeleteDB(params) + err = dc.rootCommandeer.adapter.DeleteDB(dc.deleteAll, dc.ignoreErrors, from, to) if err != nil { - return errors.Wrapf(err, "Failed to delete %s TSDB table '%s' in container '%s'.", partialMsg, dc.rootCommandeer.v3iocfg.TablePath, dc.rootCommandeer.v3iocfg.Container) + return errors.Wrapf(err, "failed to delete %s TSDB table '%s' in container '%s'", partialMsg, dc.rootCommandeer.v3iocfg.TablePath, dc.rootCommandeer.v3iocfg.Container) } fmt.Printf("Successfully deleted %s TSDB table '%s' from container '%s'.\n", partialMsg, dc.rootCommandeer.v3iocfg.TablePath, dc.rootCommandeer.v3iocfg.Container) @@ -169,7 +145,7 @@ func getConfirmation(prompt string) (bool, error) { response, err := reader.ReadString('\n') if err != nil { - errors.Wrap(err, "Failed to get user input.") + return false, errors.Wrap(err, "failed to get user input") } response = strings.ToLower(strings.TrimSpace(response)) diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/query.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/query.go index bd7f5ec6..be8997c4 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/query.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/query.go @@ -121,11 +121,11 @@ Arguments: func (qc *queryCommandeer) query() error { if qc.name == "" && qc.filter == "" { - return errors.New("The query command must receive either a metric-name paramter () or a query filter (set via the -f|--filter flag).") + return errors.New("the query command must receive either a metric-name paramter () or a query filter (set via the -f|--filter flag)") } if qc.last != "" && (qc.from != "" || qc.to != "") { - return errors.New("The -l|--last flag cannot be set together with the -b|--begin and/or -e|--end flags.") + return errors.New("the -l|--last flag cannot be set together with the -b|--begin and/or -e|--end flags") } // Initialize parameters and adapter @@ -172,9 +172,8 @@ func (qc *queryCommandeer) query() error { if !qc.oldQuerier { return qc.newQuery(from, to, step) - } else { - return qc.oldQuery(from, to, step) } + return qc.oldQuery(from, to, step) } func (qc *queryCommandeer) newQuery(from, to, step int64) error { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/tsdbctl.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/tsdbctl.go index f40037d2..b8042a8d 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/tsdbctl.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/tsdbctl.go @@ -37,7 +37,7 @@ type RootCommandeer struct { logger logger.Logger v3iocfg *config.V3ioConfig cmd *cobra.Command - v3ioUrl string + v3ioURL string dbPath string cfgFilePath string logLevel string @@ -69,7 +69,7 @@ func NewRootCommandeer() *RootCommandeer { // although it's documented as Required, because this flag isn't required // for the hidden `time` command + during internal tests we might want to // configure the table path in a configuration file. - cmd.PersistentFlags().StringVarP(&commandeer.v3ioUrl, "server", "s", "", + cmd.PersistentFlags().StringVarP(&commandeer.v3ioURL, "server", "s", "", "Web-gateway (web-APIs) service endpoint of an instance of\nthe Iguazio Data Science Platform, of the format\n\":\". Examples: \"localhost:8081\"\n(when running on the target platform); \"192.168.1.100:8081\".") cmd.PersistentFlags().StringVarP(&commandeer.cfgFilePath, "config", "g", "", "Path to a YAML TSDB configuration file. When this flag isn't\nset, the CLI checks for a "+config.DefaultConfigurationFileName+" configuration\nfile in the current directory. CLI flags override file\nconfigurations. Example: \"~/cfg/my_v3io_tsdb_cfg.yaml\".") @@ -120,9 +120,8 @@ func (rc *RootCommandeer) initialize() error { // Display an error if we fail to load a configuration file if rc.cfgFilePath == "" { return errors.Wrap(err, "Failed to load the TSDB configuration.") - } else { - return errors.Wrap(err, fmt.Sprintf("Failed to load the TSDB configuration from '%s'.", rc.cfgFilePath)) } + return errors.Wrap(err, fmt.Sprintf("Failed to load the TSDB configuration from '%s'.", rc.cfgFilePath)) } return rc.populateConfig(cfg) } @@ -144,8 +143,8 @@ func (rc *RootCommandeer) populateConfig(cfg *config.V3ioConfig) error { cfg.AccessKey = rc.accessKey } - if rc.v3ioUrl != "" { - cfg.WebApiEndpoint = rc.v3ioUrl + if rc.v3ioURL != "" { + cfg.WebAPIEndpoint = rc.v3ioURL } if rc.container != "" { cfg.Container = rc.container @@ -153,7 +152,7 @@ func (rc *RootCommandeer) populateConfig(cfg *config.V3ioConfig) error { if rc.dbPath != "" { cfg.TablePath = rc.dbPath } - if cfg.WebApiEndpoint == "" { + if cfg.WebAPIEndpoint == "" { return errors.New("web API endpoint must be set") } if cfg.Container == "" { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/tsdbctl_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/tsdbctl_test.go index da10f93c..bb845de3 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/tsdbctl_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/tsdbctl/tsdbctl_test.go @@ -37,7 +37,7 @@ type testTsdbctlSuite struct { } func (suite *testTsdbctlSuite) TestPopulateConfigWithTenant() { - rc := RootCommandeer{v3ioUrl: "localhost:80123"} + rc := RootCommandeer{v3ioURL: "localhost:80123"} cfg := &config.V3ioConfig{Username: "Vel@Odar", Password: "p455w0rd", Container: "123", TablePath: "/x/y/z"} err := rc.populateConfig(cfg) @@ -52,11 +52,11 @@ func (suite *testTsdbctlSuite) TestPopulateConfigWithTenant() { expectedRc := RootCommandeer{ v3iocfg: cfg, - v3ioUrl: "localhost:80123", + v3ioURL: "localhost:80123", Reporter: metricReporter, } expectedCfg := &config.V3ioConfig{ - WebApiEndpoint: "localhost:80123", + WebAPIEndpoint: "localhost:80123", Container: "123", TablePath: "/x/y/z", Username: "Vel@Odar", @@ -79,12 +79,12 @@ func (suite *testTsdbctlSuite) TestContainerConfig() { suite.Require().NoError(err) defer os.Setenv("V3IO_ACCESS_KEY", oldAccessKey) - rc := RootCommandeer{v3ioUrl: "localhost:80123", container: "test", accessKey: "acce55-key"} + rc := RootCommandeer{v3ioURL: "localhost:80123", container: "test", accessKey: "acce55-key"} cfg := &config.V3ioConfig{Username: "Vel@Odar", Password: "p455w0rd", TablePath: "/x/y/z"} err = rc.populateConfig(cfg) expectedCfg := &config.V3ioConfig{ - WebApiEndpoint: "localhost:80123", + WebAPIEndpoint: "localhost:80123", Container: "test", TablePath: "/x/y/z", Username: "Vel@Odar", @@ -114,7 +114,7 @@ func (suite *testTsdbctlSuite) TestConfigFromEnvVarsAndPassword() { expectedCfg := *cfg err = rc.populateConfig(cfg) - expectedCfg.WebApiEndpoint = "host-from-env:123" + expectedCfg.WebAPIEndpoint = "host-from-env:123" expectedCfg.Container = "test" expectedCfg.TablePath = "/x/y/z" expectedCfg.Username = "Vel@Odar" @@ -142,7 +142,7 @@ func (suite *testTsdbctlSuite) TestConfigFromEnvVars() { expectedCfg := *cfg err = rc.populateConfig(cfg) - expectedCfg.WebApiEndpoint = "host-from-env:123" + expectedCfg.WebAPIEndpoint = "host-from-env:123" expectedCfg.AccessKey = "key-from-env" expectedCfg.Container = "test" expectedCfg.LogLevel = "info" diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/asynciter.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/asynciter.go index 3551c821..f3ec18ad 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/asynciter.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/asynciter.go @@ -189,9 +189,15 @@ func (ic *AsyncItemsCursor) processResponse() error { // until IGZ-2.0 there is a bug in Nginx regarding range-scan, the following code is a mitigation for it. if *conf.DisableNginxMitigation { - ic.sendNextGetItemsOld(resp) + err := ic.sendNextGetItemsOld(resp) + if err != nil { + return err + } } else { - ic.sendNextGetItemsNew(resp) + err := ic.sendNextGetItemsNew(resp) + if err != nil { + return err + } } return nil @@ -244,9 +250,8 @@ func (ic *AsyncItemsCursor) sendNextGetItemsNew(resp *v3io.Response) error { if getItemsResp.Last { ic.lastShards++ return nil - } else { - input.Marker = getItemsResp.NextMarker } + input.Marker = getItemsResp.NextMarker } } else { // set next marker diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/container.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/container.go index c645d1e4..ef927489 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/container.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/container.go @@ -58,7 +58,7 @@ func NewLogger(level string) (logger.Logger, error) { } func CreateContainer(logger logger.Logger, cfg *config.V3ioConfig, httpTimeout time.Duration) (v3io.Container, error) { - endpointUrl, err := buildUrl(cfg.WebApiEndpoint) + endpointURL, err := buildURL(cfg.WebAPIEndpoint) if err != nil { return nil, err } @@ -73,7 +73,7 @@ func CreateContainer(logger logger.Logger, cfg *config.V3ioConfig, httpTimeout t } session, err := context.NewSession(&v3io.NewSessionInput{ - URL: endpointUrl, + URL: endpointURL, Username: cfg.Username, Password: cfg.Password, AccessKey: cfg.AccessKey, @@ -90,16 +90,16 @@ func CreateContainer(logger logger.Logger, cfg *config.V3ioConfig, httpTimeout t return container, nil } -func buildUrl(webApiEndpoint string) (string, error) { - if !strings.HasPrefix(webApiEndpoint, "http://") && !strings.HasPrefix(webApiEndpoint, "https://") { - webApiEndpoint = "http://" + webApiEndpoint +func buildURL(webAPIEndpoint string) (string, error) { + if !strings.HasPrefix(webAPIEndpoint, "http://") && !strings.HasPrefix(webAPIEndpoint, "https://") { + webAPIEndpoint = "http://" + webAPIEndpoint } - endpointUrl, err := url.Parse(webApiEndpoint) + endpointURL, err := url.Parse(webAPIEndpoint) if err != nil { return "", err } - endpointUrl.Path = "" - return endpointUrl.String(), nil + endpointURL.Path = "" + return endpointURL.String(), nil } // Convert a V3IO blob to an integers array @@ -181,9 +181,8 @@ func respWaitLoop(comm chan int, responseChan chan *v3io.Response, timeout time. fmt.Println("\nResponse loop timed out.", requests, responses) done <- true return - } else { - active = false } + active = false } } }() diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/labels.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/labels.go index 7b7d6142..9503aed8 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/labels.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/labels.go @@ -68,7 +68,7 @@ func (ls Labels) GetKey() (string, string, uint64) { key := "" name := "" for _, lbl := range ls { - if lbl.Name == "__name__" { + if lbl.Name == MetricName { name = lbl.Value } else { key = key + lbl.Name + "=" + lbl.Value + "," @@ -85,7 +85,7 @@ func (ls Labels) GetKey() (string, string, uint64) { func (ls Labels) GetExpr() string { lblexpr := "" for _, lbl := range ls { - if lbl.Name != "__name__" { + if lbl.Name != MetricName { lblexpr = lblexpr + fmt.Sprintf("%s='%s'; ", lbl.Name, lbl.Value) } else { lblexpr = lblexpr + fmt.Sprintf("_name='%s'; ", lbl.Value) @@ -143,7 +143,7 @@ func (ls *Labels) UnmarshalJSON(b []byte) error { } // Hash returns a hash value for the label set. -func (ls Labels) HashWithMetricName() uint64 { +func (ls Labels) HashWithMetricName() (uint64, error) { b := make([]byte, 0, 1024) for _, v := range ls { @@ -154,8 +154,11 @@ func (ls Labels) HashWithMetricName() uint64 { } hash := xxhash.New() - hash.Write(b) - return hash.Sum64() + _, err := hash.Write(b) + if err != nil { + return 0, err + } + return hash.Sum64(), nil } // Hash returns a hash value for the label set. @@ -163,7 +166,7 @@ func (ls Labels) Hash() uint64 { b := make([]byte, 0, 1024) for _, v := range ls { - if v.Name == "__name__" { + if v.Name == MetricName { continue } b = append(b, v.Name...) @@ -173,7 +176,10 @@ func (ls Labels) Hash() uint64 { } hash := xxhash.New() - hash.Write(b) + _, err := hash.Write(b) + if err != nil { + return 0 + } return hash.Sum64() } @@ -288,7 +294,7 @@ func LabelsFromString(lbls string) (Labels, error) { for _, l := range splitLset { splitLbl := strings.Split(l, "=") if len(splitLbl) != 2 { - return nil, errors.New("Labels must be in the form 'key1=label1[,key2=label2,...]'.") + return nil, errors.New("labels must be in the form 'key1=label1[,key2=label2,...]'") } if err := IsValidLabelName(splitLbl[0]); err != nil { diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go index cf34e8db..e7f32bbb 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go @@ -37,16 +37,21 @@ func IsNotExistsError(err error) bool { return false } -func IsNotExistsOrConflictError(err error) bool { - errorWithStatusCode, ok := err.(v3ioerrors.ErrorWithStatusCode) - if !ok { - // error of different type - return false - } - statusCode := errorWithStatusCode.StatusCode() - // Ignore 404s and 409s - if statusCode == http.StatusNotFound || statusCode == http.StatusConflict { +const ( + errorCodeString = "ErrorCode" + falseConditionOuterErrorCode = "16777244" + falseConditionInnerErrorCode = "16777245" +) + +// Check if the current error was caused specifically because the condition was evaluated to false. +func IsFalseConditionError(err error) bool { + errString := err.Error() + + if strings.Count(errString, errorCodeString) == 2 && + strings.Contains(errString, falseConditionOuterErrorCode) && + strings.Contains(errString, falseConditionInnerErrorCode) { return true } + return false } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/timeutils.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/timeutils.go index f27abb99..580025a7 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/timeutils.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/timeutils.go @@ -85,9 +85,9 @@ func Str2unixTime(timeString string) (int64, error) { return 0, errors.Wrap(err, "Could not parse the pattern following 'now-'.") } if sign == "-" { - return CurrentTimeInMillis() - int64(t), nil + return CurrentTimeInMillis() - t, nil } else if sign == "+" { - return CurrentTimeInMillis() + int64(t), nil + return CurrentTimeInMillis() + t, nil } else { return 0, errors.Wrapf(err, "Unsupported time format: %s", timeString) } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/validators.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/validators.go index 0f6bcc18..05abce58 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/validators.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/pkg/utils/validators.go @@ -22,8 +22,8 @@ func IsValidMetricName(name string) error { } if !metricNameValidationRegex.Match([]byte(trimmed)) { - return errors.New(fmt.Sprintf("metric name containes illegal characters. Name should conform to '%s'", - metricNameValidationRegexStr)) + return fmt.Errorf("metric name containes illegal characters. Name should conform to '%s'", + metricNameValidationRegexStr) } return nil @@ -36,8 +36,8 @@ func IsValidLabelName(labelName string) error { } if !labelValidationRegex.Match([]byte(trimmed)) { - return errors.New(fmt.Sprintf("label name containes illegal characters. Label name should conform to '%s'", - labelValidationRegexStr)) + return fmt.Errorf("label name containes illegal characters. Label name should conform to '%s'", + labelValidationRegexStr) } return nil diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngestWithNuclio_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngestWithNuclio_test.go index 2637ffb2..ef4892d8 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngestWithNuclio_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngestWithNuclio_test.go @@ -50,7 +50,7 @@ func BenchmarkIngestWithNuclio(b *testing.B) { data := nutest.DataBind{ Name: defaultDbName, - Url: v3ioConfig.WebApiEndpoint, + Url: v3ioConfig.WebAPIEndpoint, Container: v3ioConfig.Container, User: v3ioConfig.Username, Password: v3ioConfig.Password, diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go index dc0fa165..694b5b2a 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/test/benchmark/BenchmarkIngest_test.go @@ -157,7 +157,19 @@ func BenchmarkIngest(b *testing.B) { } func isValidDataPoint(prev, current *tsdbtest.DataPoint) bool { - return int64(current.Value)-int64(prev.Value) == 1 && current.Time > prev.Time + if current.Time > prev.Time { + switch cv := current.Value.(type) { + case float64: + if pv, ok := prev.Value.(float64); ok { + return int64(cv)-int64(pv) == 1 + } + case string: + return true + default: + return false + } + } + return false } func runTest( diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/.gitignore b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/.gitignore new file mode 100644 index 00000000..7b4ccb1d --- /dev/null +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/.gitignore @@ -0,0 +1,150 @@ +# Created by .ignore support plugin (hsz.mobi) +### Go template +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +### Python template +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Jetbrains project settings +.idea/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/Jenkinsfile b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/Jenkinsfile index f8e91bdb..92e407a6 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/Jenkinsfile +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/Jenkinsfile @@ -105,7 +105,7 @@ podTemplate(label: "${git_project}-${label}", inheritFrom: "jnlp-docker-golang-p parallel( 'upload linux binaries': { container('jnlp') { - github.upload_asset(git_project, git_project_user, "framesd-${github.TAG_VERSION}-linux-amd64", RELEASE_ID, GIT_TOKEN) + github.upload_asset(git_project, git_project_user, "framesd-${github.TAG_VERSION}-linux-amd64", RELEASE_ID, GIT_TOKEN, "${github.BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") } }, 'upload linux binaries artifactory': { @@ -113,18 +113,18 @@ podTemplate(label: "${git_project}-${label}", inheritFrom: "jnlp-docker-golang-p withCredentials([ string(credentialsId: pipelinex.PackagesRepo.ARTIFACTORY_IGUAZIO[2], variable: 'PACKAGES_ARTIFACTORY_PASSWORD') ]) { - common.upload_file_to_artifactory(pipelinex.PackagesRepo.ARTIFACTORY_IGUAZIO[0], pipelinex.PackagesRepo.ARTIFACTORY_IGUAZIO[1], PACKAGES_ARTIFACTORY_PASSWORD, "iguazio-devops/k8s", "framesd-${github.TAG_VERSION}-linux-amd64") + common.upload_file_to_artifactory(pipelinex.PackagesRepo.ARTIFACTORY_IGUAZIO[0], pipelinex.PackagesRepo.ARTIFACTORY_IGUAZIO[1], PACKAGES_ARTIFACTORY_PASSWORD, "iguazio-devops/k8s", "framesd-${github.TAG_VERSION}-linux-amd64", "${github.BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") } } }, 'upload darwin binaries': { container('jnlp') { - github.upload_asset(git_project, git_project_user, "framesd-${github.TAG_VERSION}-darwin-amd64", RELEASE_ID, GIT_TOKEN) + github.upload_asset(git_project, git_project_user, "framesd-${github.TAG_VERSION}-darwin-amd64", RELEASE_ID, GIT_TOKEN, "${github.BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") } }, 'upload windows binaries': { container('jnlp') { - github.upload_asset(git_project, git_project_user, "framesd-${github.TAG_VERSION}-windows-amd64", RELEASE_ID, GIT_TOKEN) + github.upload_asset(git_project, git_project_user, "framesd-${github.TAG_VERSION}-windows-amd64", RELEASE_ID, GIT_TOKEN, "${github.BUILD_FOLDER}/src/github.com/${git_project_upstream_user}/${git_project}") } }, 'upload to pypi': { @@ -144,7 +144,9 @@ podTemplate(label: "${git_project}-${label}", inheritFrom: "jnlp-docker-golang-p try { common.shellc("TRAVIS_REPO_SLUG=v3io/frames V3IO_PYPI_USER=${V3IO_PYPI_USER} V3IO_PYPI_PASSWORD=${V3IO_PYPI_PASSWORD} TRAVIS_TAG=${FRAMES_PYPI_VERSION} make pypi") } catch (err) { - echo "Can not upload to pypi" + unstable("Failed uploading to pypi") + // Do not continue stages + throw err } } } diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/Makefile b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/Makefile index 3abb6ae2..c4b02876 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/Makefile +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/Makefile @@ -1,7 +1,7 @@ FRAMES_TAG ?= latest FRAMES_REPOSITORY ?= iguazio/ FRAMES_PATH ?= src/github.com/v3io/frames -FRAMES_BUILD_COMMAND ?= GO111MODULE=on go build -o $(GOPATH)/bin/framesd-$(FRAMES_TAG)-$(GOOS)-$(GOARCH) -ldflags "-X main.Version=$(FRAMES_TAG)" ./cmd/framesd +FRAMES_BUILD_COMMAND ?= GO111MODULE=on go build -o framesd-$(FRAMES_TAG)-$(GOOS)-$(GOARCH) -ldflags "-X main.Version=$(FRAMES_TAG)" ./cmd/framesd .PHONY: build build: @@ -11,6 +11,13 @@ build: --tag $(FRAMES_REPOSITORY)frames:$(FRAMES_TAG) \ . +build-framulate: + docker build \ + --build-arg FRAMES_VERSION=$(FRAMES_TAG) \ + --file cmd/framulate/Dockerfile \ + --tag $(FRAMES_REPOSITORY)framulate:$(FRAMES_TAG) \ + . + .PHONY: test test: test-go test-py @@ -94,7 +101,7 @@ bench-py: ./_scripts/py_benchmark.py .PHONY: frames-bin -frames-bin: ensure-gopath +frames-bin: $(FRAMES_BUILD_COMMAND) .PHONY: frames @@ -108,9 +115,3 @@ frames: --env FRAMES_TAG=$(FRAMES_TAG) \ golang:1.12 \ make frames-bin - -.PHONY: ensure-gopath -ensure-gopath: -ifndef GOPATH - $(error GOPATH must be set) -endif diff --git a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/README.md b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/README.md index 8463a830..e43926c4 100644 --- a/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/README.md +++ b/functions/ingest/vendor/github.com/v3io/v3io-tsdb/vendor/github.com/v3io/frames/README.md @@ -3,7 +3,10 @@ [![GoDoc](https://godoc.org/github.com/v3io/frames?status.svg)](https://godoc.org/github.com/v3io/frames) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) -V3IO Frames (**"Frames"**) is a multi-model open-source data-access library, developed by Iguazio, which provides a unified high-performance DataFrame API for working with data in the data store of the [Iguazio Data Science Platform](https://www.iguazio.com) (**"the platform"**). +V3IO Frames (**"Frames"**) is a multi-model open-source data-access library that provides a unified high-performance DataFrame API for working with different types of data sources (backends). +The library was developed by Iguazio to simplify working with data in the [Iguazio Data Science Platform](https://www.iguazio.com) (**"the platform"**), but it can be extended to support additional backend types. + +> **Note:** For a full API reference of the Frames platform backends, including detailed examples, see the Frames API reference in [the platform documentation](https://www.iguazio.com/docs/reference/latest-release/api-reference/frames/). #### In This Document @@ -27,10 +30,16 @@ V3IO Frames (**"Frames"**) is a multi-model open-source data-access library, dev ### Overview +- [Python Version](#python-version) - [Initialization](#initialization) - [Backend Types](#backend-types) - [`Client` Methods](#client-methods) + +#### Python Version + +The current version of Frames supports Python 3.6 and 3.7. + #### Initialization @@ -46,24 +55,29 @@ You can then use the client methods to perform different data operations on the #### Backend Types All Frames client methods receive a [`backend`](#client-method-param-backend) parameter for setting the Frames backend type. -Frames supports the following backend types: - -- `kv` — a platform NoSQL (key/value) table. -- `stream` — a platform data stream. +Frames currently supports the following backend types: + +- `nosql` | `kv` — a platform NoSQL (key/value) table. + See the [platform NoSQL backend API reference](https://www.iguazio.com/docs/reference/latest-release/api-reference/frames/nosql/). +

+ > **Note:** The documentation uses the `"nosql"` alias to the `"kv"` type, which was added in Frames v0.6.10-v0.9.13; `"kv"` is still supported for backwards compatibility with earlier releases. +- `stream` — a platform data stream **[Tech Preview]**. + See the [platform TSDB backend API reference](https://www.iguazio.com/docs/reference/latest-release/api-reference/frames/tsdb/). - `tsdb` — a time-series database (TSDB). + See the [platform streaming backend API reference](https://www.iguazio.com/docs/reference/latest-release/api-reference/frames/stream/). - `csv` — a comma-separated-value (CSV) file. This backend type is used only for testing purposes. #### `Client` Methods -The `Client` class features the following methods for supporting basic data operations: +The `Client` class features the following methods for supporting operations on a data **collection**, such as a NoSQL or TSDB table or a data stream: -- [`create`](#method-create) — creates a new TSDB table or stream ("backend data"). -- [`delete`](#method-delete) — deletes a table or stream or specific table items. -- [`read`](#method-read) — reads data from a table or stream into pandas DataFrames. -- [`write`](#method-write) — writes data from pandas DataFrames to a table or stream. -- [`execute`](#method-execute) — executes a backend-specific command on a table or stream. +- [`create`](#method-create) — creates a new collection. +- [`delete`](#method-delete) — deletes a collection or specific items of the collection. +- [`read`](#method-read) — reads data from a collection into pandas DataFrames. +- [`write`](#method-write) — writes data from pandas DataFrames to a collection. +- [`execute`](#method-execute) — executes a backend-specific command on a collection. Each backend may support multiple commands. > **Note:** Some methods or method parameters are backend-specific, as detailed in this reference. @@ -71,28 +85,18 @@ The `Client` class features the following methods for supporting basic data oper ### User Authentication -When creating a Frames client, you must provide valid platform credentials for accessing the backend data, which Frames will use to identify the identity of the user. -This can be done by using any of the following alternative methods (documented in order of precedence): - -- Provide the authentication credentials in the [`Client` constructor parameters](#client-constructor-parameters) by using either of the following methods: +When creating a Frames client, you must provide valid credentials for accessing the backend data, which Frames will use to identify the identity of the user. +This can be done by using any of the following alternative methods (documented in order of precedence). +For more information about the user authentication for the platform backends, see the [platform documentation](https://www.iguazio.com/docs/reference/latest-release/api-reference/frames/overview/#user-authentication): - - Set the [`token`](#client-param-token) constructor parameter to a valid platform access key with the required data-access permissions. - You can get the access key from the **Access Keys** window that's available from the user-profile menu of the platform dashboard, or by copying the value of the `V3IO_ACCESS_KEY` environment variable in a platform web-shell or Jupyter Notebook service. - - Set the [`user`](#client-param-user) and [`password`](#client-param-password) constructor parameters to the username and password of a platform user with the required data-access permissions. -
+- Provide the authentication credentials in the call to the [`Client` constructor](#client-constructor) — either by setting the [`token`](#client-param-token) parameter to a valid authentication token (access key) or by setting the [`user`](#client-param-user) and [`password`](#client-param-password) parameters to a username and password. + Note that you cannot set the token parameter concurrently with the username and password parameters. - > **Note:** You cannot use both methods concurrently: setting both the `token` and `user` and `password` parameters in the same constructor call will produce an error. - -- Set the authentication credentials in environment variables, by using either of the following methods: - - - Set the `V3IO_ACCESS_KEY` environment variable to a valid platform access key with the required data-access permissions. - - > **Note:** The platform's Jupyter Notebook service automatically defines the `V3IO_ACCESS_KEY` environment variable and initializes it to a valid access key for the running user of the service. - - Set the `V3IO_USERNAME` and `V3IO_PASSWORD` environment variables to the username and password of a platform user with the required data-access permissions. +- Provide the authentication credentials in environment variables — either by setting the `V3IO_ACCESS_KEY` variable to an authentication token or by setting the `V3IO_USERNAME` and `V3IO_PASSWORD` variables to a username and password. > **Note:** - > - When the client constructor is called with authentication parameters ([option #1](#user-auth-client-const-params)), the authentication-credentials environment variables (if defined) are ignored. > - When `V3IO_ACCESS_KEY` is defined, `V3IO_USERNAME` and `V3IO_PASSWORD` are ignored. + > - When the client constructor is called with authentication parameters (option #1), the authentication-credentials environment variables (if defined) are ignored. ### `Client` Constructor @@ -115,32 +119,25 @@ Client(address=""[, data_url=""], container=""[, user="", password="", token=""] #### Parameters and Data Members - **address** — The address of the Frames service (`framesd`). -
- When running locally on the platform (for example, from a Jupyter Notebook service), set this parameter to `framesd:8081` to use the gRPC (recommended) or to `framesd:8080` to use HTTP. -
- When connecting to the platform remotely, set this parameter to the API address of a Frames platform service in the parent tenant. - You can copy this address from the **API** column of the V3IO Frames service on the **Services** platform dashboard page. - + Use the `grpc://` prefix for gRPC (default; recommended) or the `http://` prefix for HTTP. + When running locally on the platform, set this parameter to `framesd:8081` to use the gRPC (recommended) or to `framesd:8080` to use HTTP; for more information, see the [platform documentation](https://www.iguazio.com/docs/reference/latest-release/api-reference/frames/client-constructor/). - **Type:** `str` - **Requirement:** Required - **data_url** — A web-API base URL for accessing the backend data. - By default, the client uses the data URL that's configured for the Frames service, which is typically the HTTPS URL of the web-APIs service of the parent platform tenant. + By default, the client uses the data URL that's configured for the Frames service; for the platform backends, this is typically the HTTPS URL of the web-APIs service of the parent tenant. - **Type:** `str` - **Requirement:** Optional -- **container** — The name of the platform data container that contains the backend data. +- **container** — The name of the data container that contains the backend data. For example, `"bigdata"` or `"users"`. - **Type:** `str` - **Requirement:** Required -- **user** — The username of a platform user with permissions to access the backend data. +- **user** — The username of a user with permissions to access the backend data. See [User Authentication](#user-authentication). - **Type:** `str` @@ -148,13 +145,13 @@ Client(address=""[, data_url=""], container=""[, user="", password="", token=""]
When the `user` parameter is set, the [`password`](#client-param-password) parameter must also be set to a matching user password. -- **password** — A platform password for the user configured in the [`user`](#client-param-user) parameter. +- **password** — A valid password for the user configured in the [`user`](#client-param-user) parameter. See [User Authentication](#user-authentication). - **Type:** `str` - **Requirement:** Required when the [`user`](#client-param-user) parameter is set. -- **token** — A valid platform access key that allows access to the backend data. +- **token** — A valid token that allows access to the backend data, such as a platform access key for the platform backends. See [User Authentication](#user-authentication). - **Type:** `str` @@ -168,7 +165,7 @@ Returns a new Frames `Client` data object. #### Examples -The following examples, for local platform execution, both create a Frames client for accessing data in the "users" container by using the authentication credentials of user "iguazio"; the first example uses access-key authentication while the second example uses username and password authentication (see [User Authentication](#user-authentication)): +The following examples, for local platform execution, both create a Frames client for accessing data in the "users" container by using the authentication credentials of user "iguazio"; the first example uses token (access-key) authentication while the second example uses username and password authentication (see [User Authentication](#user-authentication)): ```python import v3io_frames as v3f @@ -190,10 +187,10 @@ All client methods receive the following common parameters; additional, method-s - **Type:** `str` - **Requirement:** Required - - **Valid Values:** `"kv"` | `"stream"` | `"tsdb"` | `"csv"` (for testing) + - **Valid Values:** `"nosql"` | `"stream"` | `"tsdb"` | `"csv"` (for testing) -- **table** — The relative path to the backend data — a directory in the target platform data container (as configured for the client object) that represents a TSDB or NoSQL table or a data stream. - For example, `"mytable"` or `"examples/tsdb/my_metrics"`. +- **table** — The relative path to a data collection of the specified backend type in the target data container (as configured for the client object). + For example, `"mytable"` or `"/examples/tsdb/my_metrics"`. - **Type:** `str` - **Requirement:** Required unless otherwise specified in the method-specific documentation @@ -201,9 +198,9 @@ All client methods receive the following common parameters; additional, method-s ### `create` Method -Creates a new TSDB table or stream in a platform data container, according to the specified backend type. +Creates a new data collection in the configured client data container, according to the specified backend type. -The `create` method is supported by the `tsdb` and `stream` backends, but not by the `kv` backend, because NoSQL tables in the platform don't need to be created prior to ingestion; when ingesting data into a table that doesn't exist, the table is automatically created. +> **Note:** The `create` method isn't applicable to the `nosql` backend, because NoSQL tables in the platform don't need to be created prior to ingestion; when ingesting data into a table that doesn't exist, the table is automatically created. - [Syntax](#method-create-syntax) - [Common parameters](#method-create-common-params) @@ -215,45 +212,59 @@ The `create` method is supported by the `tsdb` and `stream` backends, but not by #### Syntax ```python -create(backend, table, attrs=None, schema=None, if_exists=FAIL) +create(backend, table, schema=None, if_exists=FAIL, **kw) ``` - #### Common `create` Parameters All Frames backends that support the `create` method support the following common parameters: -- **attrs** — A dictionary of `: ` pairs for passing additional backend-specific parameters (arguments). +- **if_exists** — Determines whether to raise an error when the specified collection ([`table`](#client-method-param-table)) already exists. - - **Type:** `dict` - - **Requirement:** Required for the `tsdb` backend; optional otherwise - - **Valid Values:** The valid values are backend-specific. - See [tsdb Backend create Parameters](#method-create-params-tsdb) and [stream Backend create Parameters](#method-create-params-stream). + - **Type:** `pb.ErrorOptions` enumeration. + To use the enumeration, import the `frames_pb2 module`; for example: +

+ ```python + from v3io_frames import frames_pb2 as fpb + ``` + - **Requirement:** Optional + - **Valid Values:** `FAIL` to raise an error when the specified collection already exist; `IGNORE` to ignore this + - **Default Value:** `FAIL` + +- **schema** — a schema for describing unstructured collection data. + This parameter is intended to be used only for testing purposes with the `csv` backend. + + - **Type:** Backend-specific or `None` + - **Requirement:** Optional - **Default Value:** `None` +- **kw** — This parameter is used for passing a variable-length list of additional keyword (named) arguments. + For more information, see the backend-specific method parameters. + + - **Type:** `**` — variable-length keyword arguments list + - **Requirement:** Optional + #### `tsdb` Backend `create` Parameters -The following `create` parameters are specific to the `tsdb` backend and are passed via the method's [`attrs`](#method-create-param-attrs) parameter; for more information about these parameters, see the [V3IO TSDB documentation](https://github.com/v3io/v3io-tsdb#v3io-tsdb): +The following `create` parameters are specific to the `tsdb` backend and are passed as keyword arguments via the `kw` parameter; for more information and examples, see the platform's [Frames TSDB-backend reference](https://www.iguazio.com/docs/reference/latest-release/api-reference/frames/tsdb/create/): -- **rate** — The ingestion rate of the TSDB metric samples. - It's recommended that you set the rate to the average expected ingestion rate, and that the ingestion rates for a given TSDB table don't vary significantly; when there's a big difference in the ingestion rates (for example, x10), use separate TSDB tables. +- **rate** — metric-samples ingestion rate. - **Type:** `str` - **Requirement:** Required - **Valid Values:** A string of the format `"[0-9]+/[smh]"` — where '`s`' = seconds, '`m`' = minutes, and '`h`' = hours. For example, `"1/s"` (one sample per minute), `"20/m"` (20 samples per minute), or `"50/h"` (50 samples per hour). -- **aggregates** — A list of aggregation functions for executing in real time during the samples ingestion ("pre-aggregation"). +- **aggregates** — A list of aggregation functions for real-time aggregation during the samples ingestion ("pre-aggregation"). - **Type:** `str` - **Requirement:** Optional - **Valid Values:** A string containing a comma-separated list of supported aggregation functions — `avg`| `count`| `last`| `max`| `min`| `rate`| `stddev`| `stdvar`| `sum`. For example, `"count,avg,min,max"`. -- **aggregation-granularity** — Aggregation granularity; i.e., a time interval for applying pre-aggregation functions, if configured in the [`aggregates`](#method-create-tsdb-param-aggregates) parameter. +- **aggregation_granularity** — Aggregation granularity; applicable when the [`aggregates`](#method-create-tsdb-param-aggregates) parameter is set. - **Type:** `str` - **Requirement:** Optional @@ -264,7 +275,7 @@ The following `create` parameters are specific to the `tsdb` backend and are pas #### `stream` Backend `create` Parameters -The following `create` parameters are specific to the `stream` backend and are passed via the method's [`attrs`](#method-create-param-attrs) parameter; for more information about these parameters, see the [platform's streams documentation](https://www.iguazio.com/docs/concepts/latest-release/streams): +The following `create` parameters are specific to the `stream` backend and are passed as keyword arguments via the `kw` parameter; for more information and examples, see the platform's [Frames streaming-backend reference](https://www.iguazio.com/docs/reference/latest-release/api-reference/frames/stream/create/): - **shards** — The number of stream shards to create. @@ -288,44 +299,33 @@ The following `create` parameters are specific to the `stream` backend and are p ##### `tsdb` Backend -- Create a TSDB table named "mytable" in the root directory of the client's data container with an ingestion rate of 10 samples per minute: - - ```python - client.create("tsdb", "/mytable", attrs={"rate": "10/m"}) - ``` - -- Create a TSDB table named "my_metrics" in a **tsdb** directory in the client's data container with an ingestion rate of 1 sample per second. - The table is created with the `count`, `avg`, `min`, and `max` aggregates and an aggregation granularity of 1 hour: +```python +client.create("tsdb", table="mytsdb", rate="10/m") +``` - ```python - client.create("tsdb", "/tsdb/my_metrics", attrs={"rate": "1/s", "aggregates": "count,avg,min,max", "aggregation-granularity": "1h"}) - ``` +```python +client.create("tsdb", table="/tsdb/my_metrics", rate="1/s", aggregates="count,avg,min,max", aggregation_granularity="1h") +``` ##### `stream` Backend -- Create a stream named "mystream" in the root directory of the client's data container. - The stream has 6 shards and a retention period of 1 hour (default): - - ```python - client.create("stream", "/mystream", attrs={"shards": 6}) - ``` - -- Create a stream named "stream1" in a "my_streams" directory in the client's data container. - The stream has 24 shards (default) and a retention period of 2 hours: +```python +client.create("stream", table="/mystream", shards=3) +``` - ```python - client.create("stream", "my_streams/stream1", attrs={"retention_hours": 2}) - ``` +```python +client.create("stream", table="/my_streams/stream1", retention_hours=2) +``` ### `write` Method -Writes data from a DataFrame to a table or stream in a platform data container, according to the specified backend type. +Writes data from a DataFrame to a data collection, according to the specified backend type. - [Syntax](#method-write-syntax) - [Common parameters](#method-write-common-params) -- [`kv` backend `write` parameters](#method-write-params-kv) +- [`nosql` backend `write` parameters](#method-write-params-nosql) - [`tsdb` backend `write` parameters](#method-write-params-tsdb) - [Examples](#method-write-examples) @@ -334,7 +334,8 @@ Writes data from a DataFrame to a table or stream in a platform data container, ```python write(backend, table, dfs, expression='', condition='', labels=None, - max_in_message=0, index_cols=None, partition_keys=None) + max_rows_in_msg=0, index_cols=None, save_mode='createNewItemsOnly', + partition_keys=None): ``` > **Note:** The `expression` and `partition_keys` parameters aren't supported in the current release. @@ -345,23 +346,44 @@ write(backend, table, dfs, expression='', condition='', labels=None, All Frames backends that support the `write` method support the following common parameters: -- **dfs** (Required) — A single DataFrame, a list of DataFrames, or a DataFrames iterator — One or more DataFrames containing the data to write. - (See the [`tsdb` backend-specific parameters](#method-write-tsdb-param-dfs).) -- **index_cols** (Optional) (default: `None`) — `[]str` — A list of column (attribute) names to be used as index columns for the write operation, regardless of any index-column definitions in the DataFrame. +- **dfs** — One or more DataFrames containing the data to write. + + - **Type:** A single DataFrame, a list of DataFrames, or a DataFrames iterator + - **Requirement:** Required + +- **index_cols** — A list of column (attribute) names to be used as index columns for the write operation, regardless of any index-column definitions in the DataFrame. By default, the DataFrame's index columns are used.
> **Note:** The significance and supported number of index columns is backend specific. - > For example, the `kv` backend supports only a single index column for the primary-key item attribute, while the `tsdb` backend supports additional index columns for metric labels. -- **labels** (Optional) — This parameter is currently applicable only to the `tsdb` backend (although it's available for all backends) and is therefore documented as part of the `write` method's [`tsdb` backend parameters](#method-write-tsdb-param-labels). -- **max_in_message** (Optional) (default: `0`) + > For example, the `nosql` backend supports only a single index column for the primary-key item attribute, while the `tsdb` backend supports additional index columns for metric labels. + + - **Type:** `[]str` + - **Requirement:** Optional + - **Default Value:** `None` + +- **labels** — This parameter is currently applicable only to the `tsdb` backend (although it's available for all backends) and is therefore documented as part of the `write` method's [`tsdb` backend parameters](#method-write-tsdb-param-labels). + + - **Type:** `dict` + - **Requirement:** Optional + +- **save_mode** — This parameter is currently applicable only to the `nosql` backend, and is therefore documented as part of the `write` method's [`nosql` backend parameters](#method-write-nosql-param-save_mode). - -#### `kv` Backend `write` Parameters + - **Type:** `str` + - **Requirement:** Optional + +- **max_rows_in_msg** — Maximum number of rows to write in each message (write chunk size). + + - **Type:** `int` + - **Requirement:** Optional + - **Default Value:** `0` + + +#### `nosql` Backend `write` Parameters -The following `write` parameters are specific to the `kv` backend; for more information about these parameters, see the platform's NoSQL documentation: +The following `write` parameters are specific to the `nosql` backend; for more information and examples, see the platform's [Frames NoSQL-backend reference](https://www.iguazio.com/docs/reference/latest-release/api-reference/frames/nosql/write/): -- **condition** (Optional) (default: `None`) — A platform condition expression that defines conditions for performing the write operation. - For detailed information about platform condition expressions, see the [platform documentation](https://www.iguazio.com/docs/reference/latest-release/expressions/condition-expression/). +- **condition** — A platform condition expression that defines conditions for performing the write operation. - -#### `tsdb` Backend `write` Parameters + - **Type:** `str` + - **Requirement:** Optional -The following `write` parameter descriptions are specific to the `tsdb` backend; for more information about these parameters, see the [V3IO TSDB documentation](https://github.com/v3io/v3io-tsdb#v3io-tsdb): +- **save_mode** — Save mode, which determines in which circumstances to write new item to the table. -- **dfs** (Required) — A single DataFrame, a list of DataFrames, or a DataFrames iterator — One or more DataFrames containing the data to write. - This is a common `write` parameter, but the following information is specific to the `tsdb` backend: + - **Type:** `str` + - **Requirement:** Optional + - **Valid Values:** + - `createNewItemsOnly` — write only new items; don't replace or update any existing table item with the same name (primary-key attribute value) as a written item. + - `"updateItem"` — update items; add new items and update the attributes of existing table items. + - `"overwriteItem"` — overwrite items; add new items and replace any existing table item with the same name as a written item. + - `"errorIfTableExists"` — create a new table only; only write items if the target table doesn't already exist. + - `"overwriteTable"` — overwrite the table; replace all existing table items (if any) with the written items. + - **Default Value:** `createNewItemsOnly` + + +#### `tsdb` Backend `write` Parameters - - You must define one or more non-index DataFrame columns that represent the sample metrics; the name of the column is the metric name and its values is the sample data (i.e., the ingested metric). - - You must define a single index column whose value is the sample time of the data. - This column serves as the table's primary-key attribute. - Note that a TSDB DataFrame cannot have more than one index column of a time data type. - - You can optionally define string index columns that represent metric labels for the current DataFrame row. - Note that you can also define labels for all DataFrame rows by using the [`labels`](#method-write-tsdb-param-labels) parameter (in addition or instead of using column indexes to apply labels to a specific row). +The following `write` parameter descriptions are specific to the `tsdb` backend; for more information and examples, see the platform's [Frames TSDB-backend reference](https://www.iguazio.com/docs/reference/latest-release/api-reference/frames/tsdb/write/): -- **labels** (Optional) (default: `None`) — `dict` — A dictionary of metric labels of the format `{